mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-26 08:10:20 +00:00
cephfs: volumes are now created for separate ceph users with limited access to fs
Uses a slightly modified version of https://github.com/kubernetes-incubator/external-storage/blob/master/ceph/cephfs/cephfs_provisioner/cephfs_provisioner.py This should be rewritten properly in Go, but for it works for now - for demonstration purposes TODO: * readOnly is not taken into account * controllerServer.DeleteVolume does nothing
This commit is contained in:
parent
3dc810a75b
commit
06f411bbf3
@ -5,9 +5,10 @@ LABEL description="CephFS CSI Plugin"
|
|||||||
ENV CEPH_VERSION "luminous"
|
ENV CEPH_VERSION "luminous"
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y ceph-fuse attr && \
|
apt-get install -y ceph-common ceph-fuse && \
|
||||||
apt-get autoremove
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
COPY cephfsplugin /cephfsplugin
|
COPY cephfsplugin /cephfsplugin
|
||||||
RUN chmod +x /cephfsplugin
|
COPY cephfs_provisioner.py /cephfs_provisioner.py
|
||||||
|
RUN chmod +x /cephfsplugin && chmod +x /cephfs_provisioner.py
|
||||||
ENTRYPOINT ["/cephfsplugin"]
|
ENTRYPOINT ["/cephfsplugin"]
|
||||||
|
332
deploy/cephfs/docker/cephfs_provisioner.py
Normal file
332
deploy/cephfs/docker/cephfs_provisioner.py
Normal file
@ -0,0 +1,332 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
# Copyright 2017 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import rados
|
||||||
|
import getopt
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
|
||||||
|
"""
|
||||||
|
CEPH_CLUSTER_NAME=test CEPH_MON=172.24.0.4 CEPH_AUTH_ID=admin CEPH_AUTH_KEY=AQCMpH9YM4Q1BhAAXGNQyyOne8ZsXqWGon/dIQ== cephfs_provisioner.py -n foo -u bar
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import ceph_volume_client
|
||||||
|
ceph_module_found = True
|
||||||
|
except ImportError as e:
|
||||||
|
ceph_volume_client = None
|
||||||
|
ceph_module_found = False
|
||||||
|
|
||||||
|
VOlUME_GROUP="kubernetes"
|
||||||
|
CONF_PATH="/etc/ceph/"
|
||||||
|
|
||||||
|
class CephFSNativeDriver(object):
|
||||||
|
"""Driver for the Ceph Filesystem.
|
||||||
|
|
||||||
|
This driver is 'native' in the sense that it exposes a CephFS filesystem
|
||||||
|
for use directly by guests, with no intermediate layer like NFS.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self._volume_client = None
|
||||||
|
|
||||||
|
|
||||||
|
def _create_conf(self, cluster_name, mons):
|
||||||
|
""" Create conf using monitors
|
||||||
|
Create a minimal ceph conf with monitors and cephx
|
||||||
|
"""
|
||||||
|
conf_path = CONF_PATH + cluster_name + ".conf"
|
||||||
|
if not os.path.isfile(conf_path) or os.access(conf_path, os.W_OK):
|
||||||
|
conf = open(conf_path, 'w')
|
||||||
|
conf.write("[global]\n")
|
||||||
|
conf.write("mon_host = " + mons + "\n")
|
||||||
|
conf.write("auth_cluster_required = cephx\nauth_service_required = cephx\nauth_client_required = cephx\n")
|
||||||
|
conf.close()
|
||||||
|
return conf_path
|
||||||
|
|
||||||
|
def _create_keyring(self, cluster_name, id, key):
|
||||||
|
""" Create client keyring using id and key
|
||||||
|
"""
|
||||||
|
keyring_path = CONF_PATH + cluster_name + "." + "client." + id + ".keyring"
|
||||||
|
if not os.path.isfile(keyring_path) or os.access(keyring_path, os.W_OK):
|
||||||
|
keyring = open(keyring_path, 'w')
|
||||||
|
keyring.write("[client." + id + "]\n")
|
||||||
|
keyring.write("key = " + key + "\n")
|
||||||
|
keyring.write("caps mds = \"allow *\"\n")
|
||||||
|
keyring.write("caps mon = \"allow *\"\n")
|
||||||
|
keyring.write("caps osd = \"allow *\"\n")
|
||||||
|
keyring.close()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def volume_client(self):
|
||||||
|
if self._volume_client:
|
||||||
|
return self._volume_client
|
||||||
|
|
||||||
|
if not ceph_module_found:
|
||||||
|
raise ValueError("Ceph client libraries not found.")
|
||||||
|
|
||||||
|
try:
|
||||||
|
cluster_name = os.environ["CEPH_CLUSTER_NAME"]
|
||||||
|
except KeyError:
|
||||||
|
cluster_name = "ceph"
|
||||||
|
try:
|
||||||
|
mons = os.environ["CEPH_MON"]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError("Missing CEPH_MON env")
|
||||||
|
try:
|
||||||
|
auth_id = os.environ["CEPH_AUTH_ID"]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError("Missing CEPH_AUTH_ID")
|
||||||
|
try:
|
||||||
|
auth_key = os.environ["CEPH_AUTH_KEY"]
|
||||||
|
except:
|
||||||
|
raise ValueError("Missing CEPH_AUTH_KEY")
|
||||||
|
|
||||||
|
conf_path = self._create_conf(cluster_name, mons)
|
||||||
|
self._create_keyring(cluster_name, auth_id, auth_key)
|
||||||
|
|
||||||
|
self._volume_client = ceph_volume_client.CephFSVolumeClient(
|
||||||
|
auth_id, conf_path, cluster_name)
|
||||||
|
try:
|
||||||
|
self._volume_client.connect(None)
|
||||||
|
except Exception:
|
||||||
|
self._volume_client = None
|
||||||
|
raise
|
||||||
|
|
||||||
|
return self._volume_client
|
||||||
|
|
||||||
|
def _authorize_ceph(self, volume_path, auth_id, readonly):
|
||||||
|
path = self._volume_client._get_path(volume_path)
|
||||||
|
|
||||||
|
# First I need to work out what the data pool is for this share:
|
||||||
|
# read the layout
|
||||||
|
pool_name = self._volume_client._get_ancestor_xattr(path, "ceph.dir.layout.pool")
|
||||||
|
namespace = self._volume_client.fs.getxattr(path, "ceph.dir.layout.pool_namespace")
|
||||||
|
|
||||||
|
# Now construct auth capabilities that give the guest just enough
|
||||||
|
# permissions to access the share
|
||||||
|
client_entity = "client.{0}".format(auth_id)
|
||||||
|
want_access_level = 'r' if readonly else 'rw'
|
||||||
|
want_mds_cap = 'allow r,allow {0} path={1}'.format(want_access_level, path)
|
||||||
|
want_osd_cap = 'allow {0} pool={1} namespace={2}'.format(
|
||||||
|
want_access_level, pool_name, namespace)
|
||||||
|
|
||||||
|
try:
|
||||||
|
existing = self._volume_client._rados_command(
|
||||||
|
'auth get',
|
||||||
|
{
|
||||||
|
'entity': client_entity
|
||||||
|
}
|
||||||
|
)
|
||||||
|
# FIXME: rados raising Error instead of ObjectNotFound in auth get failure
|
||||||
|
except rados.Error:
|
||||||
|
caps = self._volume_client._rados_command(
|
||||||
|
'auth get-or-create',
|
||||||
|
{
|
||||||
|
'entity': client_entity,
|
||||||
|
'caps': [
|
||||||
|
'mds', want_mds_cap,
|
||||||
|
'osd', want_osd_cap,
|
||||||
|
'mon', 'allow r']
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# entity exists, update it
|
||||||
|
cap = existing[0]
|
||||||
|
|
||||||
|
# Construct auth caps that if present might conflict with the desired
|
||||||
|
# auth caps.
|
||||||
|
unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw'
|
||||||
|
unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path)
|
||||||
|
unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format(
|
||||||
|
unwanted_access_level, pool_name, namespace)
|
||||||
|
|
||||||
|
def cap_update(orig, want, unwanted):
|
||||||
|
# Updates the existing auth caps such that there is a single
|
||||||
|
# occurrence of wanted auth caps and no occurrence of
|
||||||
|
# conflicting auth caps.
|
||||||
|
|
||||||
|
cap_tokens = set(orig.split(","))
|
||||||
|
|
||||||
|
cap_tokens.discard(unwanted)
|
||||||
|
cap_tokens.add(want)
|
||||||
|
|
||||||
|
return ",".join(cap_tokens)
|
||||||
|
|
||||||
|
osd_cap_str = cap_update(cap['caps'].get('osd', ""), want_osd_cap, unwanted_osd_cap)
|
||||||
|
mds_cap_str = cap_update(cap['caps'].get('mds', ""), want_mds_cap, unwanted_mds_cap)
|
||||||
|
|
||||||
|
caps = self._volume_client._rados_command(
|
||||||
|
'auth caps',
|
||||||
|
{
|
||||||
|
'entity': client_entity,
|
||||||
|
'caps': [
|
||||||
|
'mds', mds_cap_str,
|
||||||
|
'osd', osd_cap_str,
|
||||||
|
'mon', cap['caps'].get('mon')]
|
||||||
|
})
|
||||||
|
caps = self._volume_client._rados_command(
|
||||||
|
'auth get',
|
||||||
|
{
|
||||||
|
'entity': client_entity
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Result expected like this:
|
||||||
|
# [
|
||||||
|
# {
|
||||||
|
# "entity": "client.foobar",
|
||||||
|
# "key": "AQBY0\/pViX\/wBBAAUpPs9swy7rey1qPhzmDVGQ==",
|
||||||
|
# "caps": {
|
||||||
|
# "mds": "allow *",
|
||||||
|
# "mon": "allow *"
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
assert len(caps) == 1
|
||||||
|
assert caps[0]['entity'] == client_entity
|
||||||
|
return caps[0]
|
||||||
|
|
||||||
|
def create_share(self, path, user_id, size=None):
|
||||||
|
"""Create a CephFS volume.
|
||||||
|
"""
|
||||||
|
volume_path = ceph_volume_client.VolumePath(VOlUME_GROUP, path)
|
||||||
|
|
||||||
|
# Create the CephFS volume
|
||||||
|
volume = self.volume_client.create_volume(volume_path, size=size)
|
||||||
|
|
||||||
|
# To mount this you need to know the mon IPs and the path to the volume
|
||||||
|
mon_addrs = self.volume_client.get_mon_addrs()
|
||||||
|
|
||||||
|
export_location = "{addrs}:{path}".format(
|
||||||
|
addrs=",".join(mon_addrs),
|
||||||
|
path=volume['mount_path'])
|
||||||
|
|
||||||
|
"""TODO
|
||||||
|
restrict to user_id
|
||||||
|
"""
|
||||||
|
auth_result = self._authorize_ceph(volume_path, user_id, False)
|
||||||
|
ret = {
|
||||||
|
'path': volume['mount_path'],
|
||||||
|
'user': auth_result['entity'],
|
||||||
|
'key': auth_result['key']
|
||||||
|
}
|
||||||
|
|
||||||
|
self._create_keyring(self.volume_client.cluster_name, user_id, auth_result['key'])
|
||||||
|
|
||||||
|
return json.dumps(ret)
|
||||||
|
|
||||||
|
def _deauthorize(self, volume_path, auth_id):
|
||||||
|
"""
|
||||||
|
The volume must still exist.
|
||||||
|
NOTE: In our `_authorize_ceph` method we give user extra mds `allow r`
|
||||||
|
cap to work around a kernel cephfs issue. So we need a customized
|
||||||
|
`_deauthorize` method to remove caps instead of using
|
||||||
|
`volume_client._deauthorize`.
|
||||||
|
This methid is modified from
|
||||||
|
https://github.com/ceph/ceph/blob/v13.0.0/src/pybind/ceph_volume_client.py#L1181.
|
||||||
|
"""
|
||||||
|
client_entity = "client.{0}".format(auth_id)
|
||||||
|
path = self.volume_client._get_path(volume_path)
|
||||||
|
path = self.volume_client._get_path(volume_path)
|
||||||
|
pool_name = self.volume_client._get_ancestor_xattr(path, "ceph.dir.layout.pool")
|
||||||
|
namespace = self.volume_client.fs.getxattr(path, "ceph.dir.layout.pool_namespace")
|
||||||
|
|
||||||
|
# The auth_id might have read-only or read-write mount access for the
|
||||||
|
# volume path.
|
||||||
|
access_levels = ('r', 'rw')
|
||||||
|
want_mds_caps = {'allow {0} path={1}'.format(access_level, path)
|
||||||
|
for access_level in access_levels}
|
||||||
|
want_osd_caps = {'allow {0} pool={1} namespace={2}'.format(
|
||||||
|
access_level, pool_name, namespace)
|
||||||
|
for access_level in access_levels}
|
||||||
|
|
||||||
|
try:
|
||||||
|
existing = self.volume_client._rados_command(
|
||||||
|
'auth get',
|
||||||
|
{
|
||||||
|
'entity': client_entity
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def cap_remove(orig, want):
|
||||||
|
cap_tokens = set(orig.split(","))
|
||||||
|
return ",".join(cap_tokens.difference(want))
|
||||||
|
|
||||||
|
cap = existing[0]
|
||||||
|
osd_cap_str = cap_remove(cap['caps'].get('osd', ""), want_osd_caps)
|
||||||
|
mds_cap_str = cap_remove(cap['caps'].get('mds', ""), want_mds_caps)
|
||||||
|
|
||||||
|
if (not osd_cap_str) and (not osd_cap_str or mds_cap_str == "allow r"):
|
||||||
|
# If osd caps are removed and mds caps are removed or only have "allow r", we can remove entity safely.
|
||||||
|
self.volume_client._rados_command('auth del', {'entity': client_entity}, decode=False)
|
||||||
|
else:
|
||||||
|
self.volume_client._rados_command(
|
||||||
|
'auth caps',
|
||||||
|
{
|
||||||
|
'entity': client_entity,
|
||||||
|
'caps': [
|
||||||
|
'mds', mds_cap_str,
|
||||||
|
'osd', osd_cap_str,
|
||||||
|
'mon', cap['caps'].get('mon', 'allow r')]
|
||||||
|
})
|
||||||
|
|
||||||
|
# FIXME: rados raising Error instead of ObjectNotFound in auth get failure
|
||||||
|
except rados.Error:
|
||||||
|
# Already gone, great.
|
||||||
|
return
|
||||||
|
|
||||||
|
def delete_share(self, path, user_id):
|
||||||
|
volume_path = ceph_volume_client.VolumePath(VOlUME_GROUP, path)
|
||||||
|
self._deauthorize(volume_path, user_id)
|
||||||
|
self.volume_client.delete_volume(volume_path)
|
||||||
|
self.volume_client.purge_volume(volume_path)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
if self._volume_client:
|
||||||
|
self._volume_client.disconnect()
|
||||||
|
self._volume_client = None
|
||||||
|
|
||||||
|
def main():
|
||||||
|
create = True
|
||||||
|
share = ""
|
||||||
|
user = ""
|
||||||
|
cephfs = CephFSNativeDriver()
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(sys.argv[1:], "rn:u:", ["remove"])
|
||||||
|
except getopt.GetoptError:
|
||||||
|
print "Usage: " + sys.argv[0] + " --remove -n share_name -u ceph_user_id"
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
for opt, arg in opts:
|
||||||
|
if opt == '-n':
|
||||||
|
share = arg
|
||||||
|
elif opt == '-u':
|
||||||
|
user = arg
|
||||||
|
elif opt in ("-r", "--remove"):
|
||||||
|
create = False
|
||||||
|
|
||||||
|
if share == "" or user == "":
|
||||||
|
print "Usage: " + sys.argv[0] + " --remove -n share_name -u ceph_user_id"
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if create == True:
|
||||||
|
print cephfs.create_share(share, user)
|
||||||
|
else:
|
||||||
|
cephfs.delete_share(share, user)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -4,5 +4,9 @@ metadata:
|
|||||||
name: cephfs
|
name: cephfs
|
||||||
provisioner: cephfsplugin
|
provisioner: cephfsplugin
|
||||||
parameters:
|
parameters:
|
||||||
provisionRoot: /cephfs
|
adminID: admin
|
||||||
|
adminSecret: AQCdsp9aaowqEhAAHx5EFnTQBnTU7Dr1UzHwmQ==
|
||||||
|
clusterName: ceph
|
||||||
|
pool: cephfs
|
||||||
|
monitor: 192.168.122.11:6789
|
||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
|
@ -39,8 +39,6 @@ type cephfsDriver struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
provisionRoot = "/cephfs"
|
|
||||||
|
|
||||||
driver *cephfsDriver
|
driver *cephfsDriver
|
||||||
version = csi.Version{
|
version = csi.Version{
|
||||||
Minor: 1,
|
Minor: 1,
|
||||||
|
@ -18,8 +18,6 @@ package cephfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -56,8 +54,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configuration
|
|
||||||
|
|
||||||
volOptions, err := newVolumeOptions(req.GetParameters())
|
volOptions, err := newVolumeOptions(req.GetParameters())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -70,49 +66,19 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||||||
volSz = int64(req.GetCapacityRange().GetRequiredBytes())
|
volSz = int64(req.GetCapacityRange().GetRequiredBytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := createMountPoint(provisionRoot); err != nil {
|
vol, err := newVolume(volId, volOptions)
|
||||||
glog.Errorf("failed to create provision root at %s: %v", provisionRoot, err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec ceph-fuse only if cephfs has not been not mounted yet
|
|
||||||
|
|
||||||
isMnt, err := isMountPoint(provisionRoot)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("stat failed: %v", err)
|
glog.Errorf("failed to create a volume: %v", err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isMnt {
|
glog.V(4).Infof("cephfs: volume created at %s", vol.Root)
|
||||||
if err = mountFuse(provisionRoot); err != nil {
|
|
||||||
glog.Error(err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new directory inside the provision root for bind-mounting done by NodePublishVolume
|
|
||||||
|
|
||||||
volPath := path.Join(provisionRoot, volId.id)
|
|
||||||
if err := os.Mkdir(volPath, 0750); err != nil {
|
|
||||||
glog.Errorf("failed to create volume %s: %v", volPath, err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set attributes & quotas
|
|
||||||
|
|
||||||
if err = setVolAttributes(volPath, volSz); err != nil {
|
|
||||||
glog.Errorf("failed to set attributes for volume %s: %v", volPath, err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(4).Infof("cephfs: created volume %s", volPath)
|
|
||||||
|
|
||||||
return &csi.CreateVolumeResponse{
|
return &csi.CreateVolumeResponse{
|
||||||
VolumeInfo: &csi.VolumeInfo{
|
VolumeInfo: &csi.VolumeInfo{
|
||||||
Id: volId.id,
|
Id: volId.id,
|
||||||
CapacityBytes: uint64(volSz),
|
CapacityBytes: uint64(volSz),
|
||||||
Attributes: req.GetParameters(),
|
Attributes: vol.makeMap(),
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -123,28 +89,11 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
volId := req.GetVolumeId()
|
// TODO
|
||||||
volPath := path.Join(provisionRoot, volId)
|
|
||||||
|
|
||||||
glog.V(4).Infof("deleting volume %s", volPath)
|
|
||||||
|
|
||||||
if err := deleteVolumePath(volPath); err != nil {
|
|
||||||
glog.Errorf("failed to delete volume %s: %v", volPath, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &csi.DeleteVolumeResponse{}, nil
|
return &csi.DeleteVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||||
res := &csi.ValidateVolumeCapabilitiesResponse{}
|
return &csi.ValidateVolumeCapabilitiesResponse{Supported: true}, nil
|
||||||
|
|
||||||
for _, capability := range req.VolumeCapabilities {
|
|
||||||
if capability.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res.Supported = true
|
|
||||||
return res, nil
|
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,6 @@ package cephfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@ -27,7 +26,6 @@ import (
|
|||||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type nodeServer struct {
|
type nodeServer struct {
|
||||||
@ -53,6 +51,16 @@ func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error {
|
|||||||
return status.Error(codes.InvalidArgument, "Target path missing in request")
|
return status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
attrs := req.GetVolumeAttributes()
|
||||||
|
|
||||||
|
if _, ok := attrs["path"]; !ok {
|
||||||
|
return status.Error(codes.InvalidArgument, "Missing path attribute")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := attrs["user"]; !ok {
|
||||||
|
return status.Error(codes.InvalidArgument, "Missing user attribute")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -105,20 +113,19 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||||||
return &csi.NodePublishVolumeResponse{}, nil
|
return &csi.NodePublishVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// It's not, do the bind-mount now
|
// It's not, exec ceph-fuse now
|
||||||
|
|
||||||
options := []string{"bind"}
|
// TODO honor req.GetReadOnly()
|
||||||
if req.GetReadonly() {
|
|
||||||
options = append(options, "ro")
|
|
||||||
}
|
|
||||||
|
|
||||||
volPath := path.Join(provisionRoot, req.GetVolumeId())
|
attrs := req.GetVolumeAttributes()
|
||||||
if err := mount.New("").Mount(volPath, targetPath, "", options); err != nil {
|
vol := volume{Root: attrs["path"], User: attrs["user"]}
|
||||||
glog.Errorf("bind-mounting %s to %s failed: %v", volPath, targetPath, err)
|
|
||||||
|
if err := vol.mount(targetPath); err != nil {
|
||||||
|
glog.Errorf("mounting volume %s to %s failed: %v", vol.Root, targetPath, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("cephfs: volume %s successfuly mounted to %s", volPath, targetPath)
|
glog.V(4).Infof("cephfs: volume %s successfuly mounted to %s", vol.Root, targetPath)
|
||||||
|
|
||||||
return &csi.NodePublishVolumeResponse{}, nil
|
return &csi.NodePublishVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
@ -129,14 +136,13 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
|||||||
}
|
}
|
||||||
|
|
||||||
volId := req.GetVolumeId()
|
volId := req.GetVolumeId()
|
||||||
targetPath := req.GetTargetPath()
|
|
||||||
|
|
||||||
if err := tryLock(volId, nsMtx, "NodeServer"); err != nil {
|
if err := tryLock(volId, nsMtx, "NodeServer"); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer nsMtx.UnlockKey(volId)
|
defer nsMtx.UnlockKey(volId)
|
||||||
|
|
||||||
if err := mount.New("").Unmount(targetPath); err != nil {
|
if err := unmountVolume(req.GetTargetPath()); err != nil {
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,28 +17,74 @@ limitations under the License.
|
|||||||
package cephfs
|
package cephfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createMountPoint(root string) error {
|
const (
|
||||||
return os.MkdirAll(root, 0750)
|
// from https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/cephfs/cephfs_provisioner
|
||||||
|
provisionerCmd = "/cephfs_provisioner.py"
|
||||||
|
userPrefix = "user-"
|
||||||
|
)
|
||||||
|
|
||||||
|
type volume struct {
|
||||||
|
Root string `json:"path"`
|
||||||
|
User string `json:"user"`
|
||||||
|
Key string `json:"key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteVolumePath(volPath string) error {
|
func newVolume(volId *volumeIdentifier, volOpts *volumeOptions) (*volume, error) {
|
||||||
return os.RemoveAll(volPath)
|
cmd := exec.Command(provisionerCmd, "-n", volId.id, "-u", userPrefix+volId.id)
|
||||||
}
|
cmd.Env = []string{
|
||||||
|
"CEPH_CLUSTER_NAME=" + volOpts.ClusterName,
|
||||||
|
"CEPH_MON=" + volOpts.Monitor,
|
||||||
|
"CEPH_AUTH_ID=" + volOpts.AdminId,
|
||||||
|
"CEPH_AUTH_KEY=" + volOpts.AdminSecret,
|
||||||
|
}
|
||||||
|
|
||||||
func mountFuse(root string) error {
|
out, err := cmd.CombinedOutput()
|
||||||
out, err := execCommand("ceph-fuse", root)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cephfs: ceph-fuse failed with following error: %v\ncephfs: ceph-fuse output: %s", err, out)
|
return nil, fmt.Errorf("cephfs: an error occurred while creating the volume: %v\ncephfs: %s", err, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\t\tcephfs_provisioner.py: %s\n", out)
|
||||||
|
|
||||||
|
vol := &volume{}
|
||||||
|
if err = json.Unmarshal(out, vol); err != nil {
|
||||||
|
return nil, fmt.Errorf("cephfs: malformed json output: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return vol, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vol *volume) mount(mountPoint string) error {
|
||||||
|
out, err := execCommand("ceph-fuse", mountPoint, "-n", vol.User, "-r", vol.Root)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cephfs: ceph-fuse failed with following error: %s\ncephfs: cephf-fuse output: %s", err, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmountFuse(root string) error {
|
func (vol *volume) unmount() error {
|
||||||
|
out, err := execCommand("fusermount", "-u", vol.Root)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cephfs: fusermount failed with following error: %v\ncephfs: fusermount output: %s", err, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vol *volume) makeMap() map[string]string {
|
||||||
|
return map[string]string{
|
||||||
|
"path": vol.Root,
|
||||||
|
"user": vol.User,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmountVolume(root string) error {
|
||||||
out, err := execCommand("fusermount", "-u", root)
|
out, err := execCommand("fusermount", "-u", root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cephfs: fusermount failed with following error: %v\ncephfs: fusermount output: %s", err, out)
|
return fmt.Errorf("cephfs: fusermount failed with following error: %v\ncephfs: fusermount output: %s", err, out)
|
||||||
@ -47,12 +93,15 @@ func unmountFuse(root string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func setVolAttributes(volPath string /*opts *fsVolumeOptions*/, maxBytes int64) error {
|
func deleteVolume(volId, user string) error {
|
||||||
out, err := execCommand("setfattr", "-n", "ceph.quota.max_bytes",
|
out, err := execCommand(provisionerCmd, "--remove", "-n", volId, "-u", user)
|
||||||
"-v", fmt.Sprintf("%d", maxBytes), volPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cephfs: setfattr failed with following error: %v\ncephfs: setfattr output: %s", err, out)
|
return fmt.Errorf("cephfs: failed to delete volume %s following error: %v\ncephfs: output: %s", volId, err, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createMountPoint(root string) error {
|
||||||
|
return os.MkdirAll(root, 0750)
|
||||||
|
}
|
||||||
|
@ -31,7 +31,7 @@ func newVolumeIdentifier(volOptions *volumeOptions, req *csi.CreateVolumeRequest
|
|||||||
uuid: uuid.NewUUID().String(),
|
uuid: uuid.NewUUID().String(),
|
||||||
}
|
}
|
||||||
|
|
||||||
volId.id = "csi-rbd-" + volId.uuid
|
volId.id = "csi-cephfs-" + volId.uuid
|
||||||
|
|
||||||
if volId.name == "" {
|
if volId.name == "" {
|
||||||
volId.name = volOptions.Pool + "-dynamic-pvc-" + volId.uuid
|
volId.name = volOptions.Pool + "-dynamic-pvc-" + volId.uuid
|
||||||
|
@ -19,9 +19,9 @@ package cephfs
|
|||||||
import "errors"
|
import "errors"
|
||||||
|
|
||||||
type volumeOptions struct {
|
type volumeOptions struct {
|
||||||
VolName string `json:"volName"`
|
|
||||||
Monitor string `json:"monitor"`
|
Monitor string `json:"monitor"`
|
||||||
Pool string `json:"pool"`
|
Pool string `json:"pool"`
|
||||||
|
ClusterName string `json:"clusterName"`
|
||||||
AdminId string `json:"adminID"`
|
AdminId string `json:"adminID"`
|
||||||
AdminSecret string `json:"adminSecret"`
|
AdminSecret string `json:"adminSecret"`
|
||||||
}
|
}
|
||||||
@ -37,27 +37,26 @@ func extractOption(dest *string, optionLabel string, options map[string]string)
|
|||||||
|
|
||||||
func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) {
|
func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) {
|
||||||
var opts volumeOptions
|
var opts volumeOptions
|
||||||
// XXX early return - we're not reading credentials from volOptions for now...
|
|
||||||
// i'll finish this once ceph-fuse accepts passing credentials through cmd args
|
if err := extractOption(&opts.AdminId, "adminID", volOptions); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := extractOption(&opts.AdminSecret, "adminSecret", volOptions); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := extractOption(&opts.Monitor, "monitor", volOptions); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := extractOption(&opts.Pool, "pool", volOptions); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := extractOption(&opts.ClusterName, "clusterName", volOptions); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &opts, nil
|
return &opts, nil
|
||||||
|
|
||||||
/*
|
|
||||||
if err := extractOption(&opts.AdminId, "adminID", volOptions); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := extractOption(&opts.AdminSecret, "adminSecret", volOptions); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := extractOption(&opts.Monitors, "monitors", volOptions); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := extractOption(&opts.Pool, "pool", volOptions); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &opts, nil
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user