Modify RBD plugin to use a single ID and move the id and key into the secret

RBD plugin needs only a single ID to manage images and operations against a
pool, mentioned in the storage class. The current scheme of 2 IDs is hence not
needed and removed in this commit.

Further, unlike CephFS plugin, the RBD plugin splits the user id and the key
into the storage class and the secret respectively. Also the parameter name
for the key in the secret is noted in the storageclass making it a variant and
hampers usability/comprehension. This is also fixed by moving the id and the key
to the secret and not retaining the same in the storage class, like CephFS.

Fixes #270

Testing done:
- Basic PVC creation and mounting

Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
ShyamsundarR
2019-06-01 17:26:42 -04:00
committed by mergify[bot]
parent 22ff5c0911
commit c5762b6b5c
25 changed files with 284 additions and 402 deletions

View File

@ -33,15 +33,15 @@ type CephFilesystemDetails struct {
MDSMap MDSMap `json:"mdsmap"`
}
func getFscID(monitors, id, key, fsName string) (int64, error) {
func getFscID(monitors string, cr *util.Credentials, fsName string) (int64, error) {
// ceph fs get myfs --format=json
// {"mdsmap":{...},"id":2}
var fsDetails CephFilesystemDetails
err := execCommandJSON(&fsDetails,
"ceph",
"-m", monitors,
"--id", id,
"--key="+key,
"--id", cr.ID,
"--key="+cr.Key,
"-c", util.CephConfigPath,
"fs", "get", fsName, "--format=json",
)
@ -61,15 +61,15 @@ type CephFilesystem struct {
DataPoolIDs []int `json:"data_pool_ids"`
}
func getMetadataPool(monitors, id, key, fsName string) (string, error) {
func getMetadataPool(monitors string, cr *util.Credentials, fsName string) (string, error) {
// ./tbox ceph fs ls --format=json
// [{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":4,...},...]
var filesystems []CephFilesystem
err := execCommandJSON(&filesystems,
"ceph",
"-m", monitors,
"--id", id,
"--key="+key,
"--id", cr.ID,
"--key="+cr.Key,
"-c", util.CephConfigPath,
"fs", "ls", "--format=json",
)
@ -91,15 +91,15 @@ type CephFilesystemDump struct {
Filesystems []CephFilesystemDetails `json:"filesystems"`
}
func getFsName(monitors, id, key string, fscID int64) (string, error) {
func getFsName(monitors string, cr *util.Credentials, fscID int64) (string, error) {
// ./tbox ceph fs dump --format=json
// JSON: {...,"filesystems":[{"mdsmap":{},"id":<n>},...],...}
var fsDump CephFilesystemDump
err := execCommandJSON(&fsDump,
"ceph",
"-m", monitors,
"--id", id,
"--key="+key,
"--id", cr.ID,
"--key="+cr.Key,
"-c", util.CephConfigPath,
"fs", "dump", "--format=json",
)

View File

@ -39,10 +39,10 @@ type cephEntity struct {
Caps cephEntityCaps `json:"caps"`
}
func (ent *cephEntity) toCredentials() *credentials {
return &credentials{
id: ent.Entity[len(cephEntityClientPrefix):],
key: ent.Key,
func (ent *cephEntity) toCredentials() *util.Credentials {
return &util.Credentials{
ID: ent.Entity[len(cephEntityClientPrefix):],
Key: ent.Key,
}
}
@ -63,30 +63,30 @@ func getSingleCephEntity(args ...string) (*cephEntity, error) {
return &ents[0], nil
}
func genUserIDs(adminCr *credentials, volID volumeID) (adminID, userID string) {
return cephEntityClientPrefix + adminCr.id, cephEntityClientPrefix + getCephUserName(volID)
func genUserIDs(adminCr *util.Credentials, volID volumeID) (adminID, userID string) {
return cephEntityClientPrefix + adminCr.ID, cephEntityClientPrefix + getCephUserName(volID)
}
func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
func getCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) (*cephEntity, error) {
adminID, userID := genUserIDs(adminCr, volID)
return getSingleCephEntity(
"-m", volOptions.Monitors,
"-n", adminID,
"--key="+adminCr.key,
"--key="+adminCr.Key,
"-c", util.CephConfigPath,
"-f", "json",
"auth", "get", userID,
)
}
func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
func createCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) (*cephEntity, error) {
adminID, userID := genUserIDs(adminCr, volID)
return getSingleCephEntity(
"-m", volOptions.Monitors,
"-n", adminID,
"--key="+adminCr.key,
"--key="+adminCr.Key,
"-c", util.CephConfigPath,
"-f", "json",
"auth", "get-or-create", userID,
@ -97,14 +97,14 @@ func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volum
)
}
func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) error {
func deleteCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) error {
adminID, userID := genUserIDs(adminCr, volID)
// TODO: Need to return success if userID is not found
return execCommandErr("ceph",
"-m", volOptions.Monitors,
"-n", adminID,
"--key="+adminCr.key,
"--key="+adminCr.Key,
"-c", util.CephConfigPath,
"auth", "rm", userID,
)

View File

@ -48,7 +48,7 @@ var (
// createBackingVolume creates the backing subvolume and user/key for the given volOptions and vID,
// and on any error cleans up any created entities
func (cs *ControllerServer) createBackingVolume(volOptions *volumeOptions, vID *volumeIdentifier, secret map[string]string) error {
cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return status.Error(codes.InvalidArgument, err.Error())
}
@ -168,14 +168,14 @@ func (cs *ControllerServer) deleteVolumeDeprecated(req *csi.DeleteVolumeRequest)
// mons may have changed since create volume,
// retrieve the latest mons and override old mons
if mon, secretsErr := getMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
if mon, secretsErr := util.GetMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
klog.Infof("overriding monitors [%q] with [%q] for volume %s", ce.VolOptions.Monitors, mon, volID)
ce.VolOptions.Monitors = mon
}
// Deleting a volume requires admin credentials
cr, err := getAdminCredentials(secrets)
cr, err := util.GetAdminCredentials(secrets)
if err != nil {
klog.Errorf("failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
@ -232,7 +232,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
}
// Deleting a volume requires admin credentials
cr, err := getAdminCredentials(secrets)
cr, err := util.GetAdminCredentials(secrets)
if err != nil {
klog.Errorf("failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())

View File

@ -1,64 +0,0 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import "fmt"
const (
credUserID = "userID"
credUserKey = "userKey"
credAdminID = "adminID"
credAdminKey = "adminKey"
credMonitors = "monitors"
)
type credentials struct {
id string
key string
}
func getCredentials(idField, keyField string, secrets map[string]string) (*credentials, error) {
var (
c = &credentials{}
ok bool
)
if c.id, ok = secrets[idField]; !ok {
return nil, fmt.Errorf("missing ID field '%s' in secrets", idField)
}
if c.key, ok = secrets[keyField]; !ok {
return nil, fmt.Errorf("missing key field '%s' in secrets", keyField)
}
return c, nil
}
func getUserCredentials(secrets map[string]string) (*credentials, error) {
return getCredentials(credUserID, credUserKey, secrets)
}
func getAdminCredentials(secrets map[string]string) (*credentials, error) {
return getCredentials(credAdminID, credAdminKey, secrets)
}
func getMonValFromSecret(secrets map[string]string) (string, error) {
if mons, ok := secrets[credMonitors]; ok {
return mons, nil
}
return "", fmt.Errorf("missing %q", credMonitors)
}

View File

@ -49,12 +49,12 @@ func checkVolExists(volOptions *volumeOptions, secret map[string]string) (*volum
vid volumeIdentifier
)
cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return nil, err
}
imageUUID, err := volJournal.CheckReservation(volOptions.Monitors, cr.id, cr.key,
imageUUID, err := volJournal.CheckReservation(volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, "")
if err != nil {
return nil, err
@ -86,12 +86,12 @@ func checkVolExists(volOptions *volumeOptions, secret map[string]string) (*volum
// undoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName
func undoVolReservation(volOptions *volumeOptions, vid volumeIdentifier, secret map[string]string) error {
cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return err
}
err = volJournal.UndoReservation(volOptions.Monitors, cr.id, cr.key, volOptions.MetadataPool,
err = volJournal.UndoReservation(volOptions.Monitors, cr, volOptions.MetadataPool,
vid.FsSubvolName, volOptions.RequestName)
return err
@ -105,12 +105,12 @@ func reserveVol(volOptions *volumeOptions, secret map[string]string) (*volumeIde
vid volumeIdentifier
)
cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return nil, err
}
imageUUID, err := volJournal.ReserveName(volOptions.Monitors, cr.id, cr.key,
imageUUID, err := volJournal.ReserveName(volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, "")
if err != nil {
return nil, err

View File

@ -90,13 +90,13 @@ func mountOneCacheEntry(volOptions *volumeOptions, vid *volumeIdentifier, me *vo
var (
err error
cr *credentials
cr *util.Credentials
)
volID := vid.VolumeID
if volOptions.ProvisionVolume {
volOptions.RootPath = getVolumeRootPathCeph(volumeID(vid.FsSubvolName))
cr, err = getAdminCredentials(decodeCredentials(me.Secrets))
cr, err = util.GetAdminCredentials(decodeCredentials(me.Secrets))
if err != nil {
return err
}
@ -107,7 +107,7 @@ func mountOneCacheEntry(volOptions *volumeOptions, vid *volumeIdentifier, me *vo
}
cr = entity.toCredentials()
} else {
cr, err = getUserCredentials(decodeCredentials(me.Secrets))
cr, err = util.GetUserCredentials(decodeCredentials(me.Secrets))
if err != nil {
return err
}

View File

@ -22,6 +22,7 @@ import (
"os"
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
"github.com/ceph/ceph-csi/pkg/util"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
@ -40,9 +41,9 @@ var (
mtxNodeVolumeID = keymutex.NewHashed(0)
)
func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
var (
cr *credentials
cr *util.Credentials
secrets = req.GetSecrets()
)
@ -51,7 +52,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi
// First, get admin credentials - those are needed for retrieving the user credentials
adminCr, err := getAdminCredentials(secrets)
adminCr, err := util.GetAdminCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
@ -67,7 +68,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi
} else {
// The volume is pre-made, credentials are in node stage secrets
userCr, err := getUserCredentials(req.GetSecrets())
userCr, err := util.GetUserCredentials(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
}

View File

@ -21,6 +21,8 @@ import (
"os"
"path"
"github.com/ceph/ceph-csi/pkg/util"
"k8s.io/klog"
)
@ -50,7 +52,7 @@ func setVolumeAttribute(root, attrName, attrValue string) error {
return execCommandErr("setfattr", "-n", attrName, "-v", attrValue, root)
}
func createVolume(volOptions *volumeOptions, adminCr *credentials, volID volumeID, bytesQuota int64) error {
func createVolume(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID, bytesQuota int64) error {
if err := mountCephRoot(volID, volOptions, adminCr); err != nil {
return err
}
@ -91,7 +93,7 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volID volumeI
return nil
}
func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions) error {
func purgeVolume(volID volumeID, adminCr *util.Credentials, volOptions *volumeOptions) error {
if err := mountCephRoot(volID, volOptions, adminCr); err != nil {
return err
}
@ -120,7 +122,7 @@ func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions
return nil
}
func mountCephRoot(volID volumeID, volOptions *volumeOptions, adminCr *credentials) error {
func mountCephRoot(volID volumeID, volOptions *volumeOptions, adminCr *util.Credentials) error {
cephRoot := getCephRootPathLocal(volID)
// Root path is not set for dynamically provisioned volumes

View File

@ -70,7 +70,7 @@ func loadAvailableMounters() error {
}
type volumeMounter interface {
mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error
mount(mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error
name() string
}
@ -114,12 +114,12 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
type fuseMounter struct{}
func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
func mountFuse(mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
args := []string{
mountPoint,
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix + cr.id, "--key=" + cr.key,
"-n", cephEntityClientPrefix + cr.ID, "--key=" + cr.Key,
"-r", volOptions.RootPath,
"-o", "nonempty",
}
@ -154,7 +154,7 @@ func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions) er
return nil
}
func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
func (m *fuseMounter) mount(mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
if err := createMountPoint(mountPoint); err != nil {
return err
}
@ -166,7 +166,7 @@ func (m *fuseMounter) name() string { return "Ceph FUSE driver" }
type kernelMounter struct{}
func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
func mountKernel(mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
if err := execCommandErr("modprobe", "ceph"); err != nil {
return err
}
@ -176,7 +176,7 @@ func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions)
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
mountPoint,
}
optionsStr := fmt.Sprintf("name=%s,secret=%s", cr.id, cr.key)
optionsStr := fmt.Sprintf("name=%s,secret=%s", cr.ID, cr.Key)
if volOptions.FsName != "" {
optionsStr += fmt.Sprintf(",mds_namespace=%s", volOptions.FsName)
}
@ -185,7 +185,7 @@ func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions)
return execCommandErr("mount", args[:]...)
}
func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
func (m *kernelMounter) mount(mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
if err := createMountPoint(mountPoint); err != nil {
return err
}

View File

@ -149,17 +149,17 @@ func newVolumeOptions(requestName string, size int64, volOptions, secret map[str
opts.RequestName = requestName
opts.Size = size
cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return nil, err
}
opts.FscID, err = getFscID(opts.Monitors, cr.id, cr.key, opts.FsName)
opts.FscID, err = getFscID(opts.Monitors, cr, opts.FsName)
if err != nil {
return nil, err
}
opts.MetadataPool, err = getMetadataPool(opts.Monitors, cr.id, cr.key, opts.FsName)
opts.MetadataPool, err = getMetadataPool(opts.Monitors, cr, opts.FsName)
if err != nil {
return nil, err
}
@ -194,23 +194,22 @@ func newVolumeOptionsFromVolID(volID string, volOpt, secrets map[string]string)
return nil, nil, errors.Wrapf(err, "failed to fetch monitor list using clusterID (%s)", vi.ClusterID)
}
cr, err := getAdminCredentials(secrets)
cr, err := util.GetAdminCredentials(secrets)
if err != nil {
return nil, nil, err
}
volOptions.FsName, err = getFsName(volOptions.Monitors, cr.id, cr.key, volOptions.FscID)
volOptions.FsName, err = getFsName(volOptions.Monitors, cr, volOptions.FscID)
if err != nil {
return nil, nil, err
}
volOptions.MetadataPool, err = getMetadataPool(volOptions.Monitors, cr.id, cr.key,
volOptions.FsName)
volOptions.MetadataPool, err = getMetadataPool(volOptions.Monitors, cr, volOptions.FsName)
if err != nil {
return nil, nil, err
}
volOptions.RequestName, _, err = volJournal.GetObjectUUIDData(volOptions.Monitors, cr.id, cr.key,
volOptions.RequestName, _, err = volJournal.GetObjectUUIDData(volOptions.Monitors, cr,
volOptions.MetadataPool, vi.ObjectUUID, false)
if err != nil {
return nil, nil, err
@ -250,7 +249,7 @@ func newVolumeOptionsFromVersion1Context(volID string, options, secrets map[stri
// check if there are mon values in secret and if so override option retrieved monitors from
// monitors in the secret
mon, err := getMonValFromSecret(secrets)
mon, err := util.GetMonValFromSecret(secrets)
if err == nil && len(mon) > 0 {
opts.Monitors = mon
}