cephfs: volumes are now created for separate ceph users with limited access to fs

Uses a slightly modified version of https://github.com/kubernetes-incubator/external-storage/blob/master/ceph/cephfs/cephfs_provisioner/cephfs_provisioner.py
This should be rewritten properly in Go, but for it works for now - for demonstration purposes

TODO:
* readOnly is not taken into account
* controllerServer.DeleteVolume does nothing
This commit is contained in:
gman
2018-03-09 17:05:19 +01:00
parent 3dc810a75b
commit 06f411bbf3
9 changed files with 452 additions and 114 deletions

View File

@ -39,8 +39,6 @@ type cephfsDriver struct {
}
var (
provisionRoot = "/cephfs"
driver *cephfsDriver
version = csi.Version{
Minor: 1,

View File

@ -18,8 +18,6 @@ package cephfs
import (
"fmt"
"os"
"path"
"github.com/golang/glog"
"golang.org/x/net/context"
@ -56,8 +54,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
return nil, err
}
// Configuration
volOptions, err := newVolumeOptions(req.GetParameters())
if err != nil {
return nil, err
@ -70,49 +66,19 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
volSz = int64(req.GetCapacityRange().GetRequiredBytes())
}
if err := createMountPoint(provisionRoot); err != nil {
glog.Errorf("failed to create provision root at %s: %v", provisionRoot, err)
return nil, status.Error(codes.Internal, err.Error())
}
// Exec ceph-fuse only if cephfs has not been not mounted yet
isMnt, err := isMountPoint(provisionRoot)
vol, err := newVolume(volId, volOptions)
if err != nil {
glog.Errorf("stat failed: %v", err)
glog.Errorf("failed to create a volume: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}
if !isMnt {
if err = mountFuse(provisionRoot); err != nil {
glog.Error(err)
return nil, status.Error(codes.Internal, err.Error())
}
}
// Create a new directory inside the provision root for bind-mounting done by NodePublishVolume
volPath := path.Join(provisionRoot, volId.id)
if err := os.Mkdir(volPath, 0750); err != nil {
glog.Errorf("failed to create volume %s: %v", volPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
// Set attributes & quotas
if err = setVolAttributes(volPath, volSz); err != nil {
glog.Errorf("failed to set attributes for volume %s: %v", volPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("cephfs: created volume %s", volPath)
glog.V(4).Infof("cephfs: volume created at %s", vol.Root)
return &csi.CreateVolumeResponse{
VolumeInfo: &csi.VolumeInfo{
Id: volId.id,
CapacityBytes: uint64(volSz),
Attributes: req.GetParameters(),
Attributes: vol.makeMap(),
},
}, nil
}
@ -123,28 +89,11 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
return nil, err
}
volId := req.GetVolumeId()
volPath := path.Join(provisionRoot, volId)
glog.V(4).Infof("deleting volume %s", volPath)
if err := deleteVolumePath(volPath); err != nil {
glog.Errorf("failed to delete volume %s: %v", volPath, err)
return nil, err
}
// TODO
return &csi.DeleteVolumeResponse{}, nil
}
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
res := &csi.ValidateVolumeCapabilitiesResponse{}
for _, capability := range req.VolumeCapabilities {
if capability.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
return res, nil
}
}
res.Supported = true
return res, nil
return &csi.ValidateVolumeCapabilitiesResponse{Supported: true}, nil
}

View File

@ -18,7 +18,6 @@ package cephfs
import (
"context"
"path"
"github.com/golang/glog"
"google.golang.org/grpc/codes"
@ -27,7 +26,6 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
)
type nodeServer struct {
@ -53,6 +51,16 @@ func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error {
return status.Error(codes.InvalidArgument, "Target path missing in request")
}
attrs := req.GetVolumeAttributes()
if _, ok := attrs["path"]; !ok {
return status.Error(codes.InvalidArgument, "Missing path attribute")
}
if _, ok := attrs["user"]; !ok {
return status.Error(codes.InvalidArgument, "Missing user attribute")
}
return nil
}
@ -105,20 +113,19 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return &csi.NodePublishVolumeResponse{}, nil
}
// It's not, do the bind-mount now
// It's not, exec ceph-fuse now
options := []string{"bind"}
if req.GetReadonly() {
options = append(options, "ro")
}
// TODO honor req.GetReadOnly()
volPath := path.Join(provisionRoot, req.GetVolumeId())
if err := mount.New("").Mount(volPath, targetPath, "", options); err != nil {
glog.Errorf("bind-mounting %s to %s failed: %v", volPath, targetPath, err)
attrs := req.GetVolumeAttributes()
vol := volume{Root: attrs["path"], User: attrs["user"]}
if err := vol.mount(targetPath); err != nil {
glog.Errorf("mounting volume %s to %s failed: %v", vol.Root, targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("cephfs: volume %s successfuly mounted to %s", volPath, targetPath)
glog.V(4).Infof("cephfs: volume %s successfuly mounted to %s", vol.Root, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
@ -129,14 +136,13 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
}
volId := req.GetVolumeId()
targetPath := req.GetTargetPath()
if err := tryLock(volId, nsMtx, "NodeServer"); err != nil {
return nil, err
}
defer nsMtx.UnlockKey(volId)
if err := mount.New("").Unmount(targetPath); err != nil {
if err := unmountVolume(req.GetTargetPath()); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

View File

@ -17,28 +17,74 @@ limitations under the License.
package cephfs
import (
"encoding/json"
"fmt"
"os"
"os/exec"
)
func createMountPoint(root string) error {
return os.MkdirAll(root, 0750)
const (
// from https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/cephfs/cephfs_provisioner
provisionerCmd = "/cephfs_provisioner.py"
userPrefix = "user-"
)
type volume struct {
Root string `json:"path"`
User string `json:"user"`
Key string `json:"key"`
}
func deleteVolumePath(volPath string) error {
return os.RemoveAll(volPath)
}
func newVolume(volId *volumeIdentifier, volOpts *volumeOptions) (*volume, error) {
cmd := exec.Command(provisionerCmd, "-n", volId.id, "-u", userPrefix+volId.id)
cmd.Env = []string{
"CEPH_CLUSTER_NAME=" + volOpts.ClusterName,
"CEPH_MON=" + volOpts.Monitor,
"CEPH_AUTH_ID=" + volOpts.AdminId,
"CEPH_AUTH_KEY=" + volOpts.AdminSecret,
}
func mountFuse(root string) error {
out, err := execCommand("ceph-fuse", root)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("cephfs: ceph-fuse failed with following error: %v\ncephfs: ceph-fuse output: %s", err, out)
return nil, fmt.Errorf("cephfs: an error occurred while creating the volume: %v\ncephfs: %s", err, out)
}
fmt.Printf("\t\tcephfs_provisioner.py: %s\n", out)
vol := &volume{}
if err = json.Unmarshal(out, vol); err != nil {
return nil, fmt.Errorf("cephfs: malformed json output: %s", err)
}
return vol, nil
}
func (vol *volume) mount(mountPoint string) error {
out, err := execCommand("ceph-fuse", mountPoint, "-n", vol.User, "-r", vol.Root)
if err != nil {
return fmt.Errorf("cephfs: ceph-fuse failed with following error: %s\ncephfs: cephf-fuse output: %s", err, out)
}
return nil
}
func unmountFuse(root string) error {
func (vol *volume) unmount() error {
out, err := execCommand("fusermount", "-u", vol.Root)
if err != nil {
return fmt.Errorf("cephfs: fusermount failed with following error: %v\ncephfs: fusermount output: %s", err, out)
}
return nil
}
func (vol *volume) makeMap() map[string]string {
return map[string]string{
"path": vol.Root,
"user": vol.User,
}
}
func unmountVolume(root string) error {
out, err := execCommand("fusermount", "-u", root)
if err != nil {
return fmt.Errorf("cephfs: fusermount failed with following error: %v\ncephfs: fusermount output: %s", err, out)
@ -47,12 +93,15 @@ func unmountFuse(root string) error {
return nil
}
func setVolAttributes(volPath string /*opts *fsVolumeOptions*/, maxBytes int64) error {
out, err := execCommand("setfattr", "-n", "ceph.quota.max_bytes",
"-v", fmt.Sprintf("%d", maxBytes), volPath)
func deleteVolume(volId, user string) error {
out, err := execCommand(provisionerCmd, "--remove", "-n", volId, "-u", user)
if err != nil {
return fmt.Errorf("cephfs: setfattr failed with following error: %v\ncephfs: setfattr output: %s", err, out)
return fmt.Errorf("cephfs: failed to delete volume %s following error: %v\ncephfs: output: %s", volId, err, out)
}
return nil
}
func createMountPoint(root string) error {
return os.MkdirAll(root, 0750)
}

View File

@ -31,7 +31,7 @@ func newVolumeIdentifier(volOptions *volumeOptions, req *csi.CreateVolumeRequest
uuid: uuid.NewUUID().String(),
}
volId.id = "csi-rbd-" + volId.uuid
volId.id = "csi-cephfs-" + volId.uuid
if volId.name == "" {
volId.name = volOptions.Pool + "-dynamic-pvc-" + volId.uuid

View File

@ -19,9 +19,9 @@ package cephfs
import "errors"
type volumeOptions struct {
VolName string `json:"volName"`
Monitor string `json:"monitor"`
Pool string `json:"pool"`
ClusterName string `json:"clusterName"`
AdminId string `json:"adminID"`
AdminSecret string `json:"adminSecret"`
}
@ -37,27 +37,26 @@ func extractOption(dest *string, optionLabel string, options map[string]string)
func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) {
var opts volumeOptions
// XXX early return - we're not reading credentials from volOptions for now...
// i'll finish this once ceph-fuse accepts passing credentials through cmd args
if err := extractOption(&opts.AdminId, "adminID", volOptions); err != nil {
return nil, err
}
if err := extractOption(&opts.AdminSecret, "adminSecret", volOptions); err != nil {
return nil, err
}
if err := extractOption(&opts.Monitor, "monitor", volOptions); err != nil {
return nil, err
}
if err := extractOption(&opts.Pool, "pool", volOptions); err != nil {
return nil, err
}
if err := extractOption(&opts.ClusterName, "clusterName", volOptions); err != nil {
return nil, err
}
return &opts, nil
/*
if err := extractOption(&opts.AdminId, "adminID", volOptions); err != nil {
return nil, err
}
if err := extractOption(&opts.AdminSecret, "adminSecret", volOptions); err != nil {
return nil, err
}
if err := extractOption(&opts.Monitors, "monitors", volOptions); err != nil {
return nil, err
}
if err := extractOption(&opts.Pool, "pool", volOptions); err != nil {
return nil, err
}
return &opts, nil
*/
}