cephfs: check volumeOptions.Mounter and choose ceph-fuse or mount.ceph accordingly

This commit is contained in:
gman 2018-03-22 14:11:51 +01:00
parent defc676b3d
commit 4c5c67b8f9
4 changed files with 95 additions and 37 deletions

View File

@ -4,9 +4,13 @@ metadata:
name: csi-cephfs name: csi-cephfs
provisioner: csi-cephfsplugin provisioner: csi-cephfsplugin
parameters: parameters:
monitors: 192.168.122.11:6789 # The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
mounter: fuse
monitors: mon1:port,mon2:port
rootPath: / rootPath: /
user: admin user: admin
csiProvisionerSecretName: csi-cephfs-secret csiProvisionerSecretName: csi-cephfs-secret
csiProvisionerSecretNameSpace: default csiProvisionerSecretNameSpace: default
reclaimPolicy: Delete reclaimPolicy: Delete

View File

@ -74,12 +74,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
volId := newVolumeIdentifier(volOptions, req) volId := newVolumeIdentifier(volOptions, req)
conf := cephConfigData{Monitors: volOptions.Monitors}
if err = conf.writeToFile(); err != nil {
glog.Errorf("couldn't generate ceph.conf: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("cephfs: volume %s successfuly created", volId.id) glog.V(4).Infof("cephfs: volume %s successfuly created", volId.id)
return &csi.CreateVolumeResponse{ return &csi.CreateVolumeResponse{

View File

@ -63,32 +63,63 @@ func validateNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) err
return nil return nil
} }
func newMounter(volOptions *volumeOptions, key string, readOnly bool) (volumeMounter, error) {
var m volumeMounter
if volOptions.Mounter == volumeMounter_fuse {
keyring := cephKeyringData{
User: volOptions.User,
Key: key,
RootPath: volOptions.RootPath,
ReadOnly: readOnly,
}
if err := keyring.writeToFile(); err != nil {
msg := fmt.Sprintf("couldn't write ceph keyring for user %s: %v", volOptions.User, err)
glog.Error(msg)
return nil, status.Error(codes.Internal, msg)
}
m = &fuseMounter{}
} else if volOptions.Mounter == volumeMounter_kernel {
secret := cephSecretData{
User: volOptions.User,
Key: key,
}
if err := secret.writeToFile(); err != nil {
msg := fmt.Sprintf("couldn't write ceph secret for user %s: %v", volOptions.User, err)
glog.Error(msg)
return nil, status.Error(codes.Internal, msg)
}
m = &kernelMounter{}
}
return m, nil
}
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
if err := validateNodePublishVolumeRequest(req); err != nil { if err := validateNodePublishVolumeRequest(req); err != nil {
return nil, err return nil, err
} }
// Configuration
targetPath := req.GetTargetPath()
volOptions, err := newVolumeOptions(req.GetVolumeAttributes())
if err != nil {
glog.Errorf("error reading volume options: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
/* /*
volId := req.GetVolumeId()
if err = tryLock(volId, nsMtx, "NodeServer"); err != nil { if err = tryLock(volId, nsMtx, "NodeServer"); err != nil {
return nil, err return nil, err
} }
defer nsMtx.UnlockKey(volId) defer nsMtx.UnlockKey(volId)
*/ */
if err = createMountPoint(targetPath); err != nil { // Configuration
glog.Errorf("failed to create mount point at %s: %v", targetPath, err)
return nil, status.Error(codes.Internal, err.Error()) targetPath := req.GetTargetPath()
volId := req.GetVolumeId()
volOptions, err := newVolumeOptions(req.GetVolumeAttributes())
if err != nil {
glog.Errorf("error reading volume options: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
} }
key, err := getKeyFromCredentials(req.GetNodePublishSecrets()) key, err := getKeyFromCredentials(req.GetNodePublishSecrets())
@ -97,17 +128,15 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return nil, status.Error(codes.InvalidArgument, err.Error()) return nil, status.Error(codes.InvalidArgument, err.Error())
} }
keyring := cephKeyringData{ if err = createMountPoint(targetPath); err != nil {
User: volOptions.User, glog.Errorf("failed to create mount point at %s: %v", targetPath, err)
Key: key, return nil, status.Error(codes.Internal, err.Error())
RootPath: volOptions.RootPath,
ReadOnly: req.GetReadonly(),
} }
if err = keyring.writeToFile(); err != nil { conf := cephConfigData{Monitors: volOptions.Monitors}
msg := fmt.Sprintf("couldn't write ceph keyring for user %s: %v", volOptions.User, err) if err = conf.writeToFile(); err != nil {
glog.Error(msg) glog.Errorf("couldn't generate ceph.conf: %v", err)
return nil, status.Error(codes.Internal, msg) return nil, status.Error(codes.Internal, err.Error())
} }
// Check if the volume is already mounted // Check if the volume is already mounted
@ -120,19 +149,24 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
} }
if isMnt { if isMnt {
glog.V(4).Infof("cephfs: volume %s is already mounted to %s", volId, targetPath)
return &csi.NodePublishVolumeResponse{}, nil return &csi.NodePublishVolumeResponse{}, nil
} }
// It's not, exec ceph-fuse now // It's not, exec ceph-fuse now
vol := volume{RootPath: volOptions.RootPath, User: volOptions.User} m, err := newMounter(volOptions, key, req.GetReadonly())
if err != nil {
if err := vol.mount(targetPath); err != nil { glog.Errorf("error while creating volumeMounter: %v", err)
glog.Errorf("mounting volume %s to %s failed: %v", vol.RootPath, targetPath, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
glog.V(4).Infof("cephfs: volume %s successfuly mounted to %s", vol.RootPath, targetPath) if err = m.mount(targetPath, volOptions); err != nil {
glog.Errorf("mounting volume %s to %s failed: %v", volId, targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("cephfs: volume %s successfuly mounted to %s", volId, targetPath)
return &csi.NodePublishVolumeResponse{}, nil return &csi.NodePublishVolumeResponse{}, nil
} }
@ -142,8 +176,9 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return nil, err return nil, err
} }
volId := req.GetVolumeId()
/* /*
volId := req.GetVolumeId()
if err := tryLock(volId, nsMtx, "NodeServer"); err != nil { if err := tryLock(volId, nsMtx, "NodeServer"); err != nil {
return nil, err return nil, err
} }
@ -154,6 +189,8 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
glog.V(4).Infof("cephfs: volume %s successfuly unmounted from %s", volId, req.GetTargetPath())
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }

View File

@ -16,12 +16,16 @@ limitations under the License.
package cephfs package cephfs
import "errors" import (
"errors"
"fmt"
)
type volumeOptions struct { type volumeOptions struct {
Monitors string `json:"monitors"` Monitors string `json:"monitors"`
RootPath string `json:"rootPath"` RootPath string `json:"rootPath"`
User string `json:"user"` User string `json:"user"`
Mounter string `json:"mounter"`
} }
func extractOption(dest *string, optionLabel string, options map[string]string) error { func extractOption(dest *string, optionLabel string, options map[string]string) error {
@ -33,6 +37,17 @@ func extractOption(dest *string, optionLabel string, options map[string]string)
} }
} }
func validateMounter(m string) error {
switch m {
case volumeMounter_fuse:
case volumeMounter_kernel:
default:
return fmt.Errorf("Unknown mounter '%s'. Valid options are 'fuse' and 'kernel'", m)
}
return nil
}
func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) { func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) {
var opts volumeOptions var opts volumeOptions
@ -48,5 +63,13 @@ func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) {
return nil, err return nil, err
} }
if err := extractOption(&opts.Mounter, "mounter", volOptions); err != nil {
return nil, err
}
if err := validateMounter(opts.Mounter); err != nil {
return nil, err
}
return &opts, nil return &opts, nil
} }