Use --keyfile option to pass keys to all Ceph CLIs

Every Ceph CLI that is invoked at present passes the key via the
--key option, and hence is exposed to key being displayed on
the host using a ps command or such means.

This commit addresses this issue by stashing the key in a tmp
file, which is again created on a tmpfs (or empty dir backed by
memory). Further using such tmp files as arguments to the --keyfile
option for every CLI that is invoked.

This prevents the key from being visible as part of the argument list
of the invoked program on the system.

Fixes: #318

Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
ShyamsundarR 2019-06-25 15:29:17 -04:00 committed by mergify[bot]
parent c2835183e5
commit bd204d7d45
24 changed files with 191 additions and 69 deletions

View File

@ -106,6 +106,8 @@ spec:
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes:
@ -142,6 +144,10 @@ spec:
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.nodeplugin.affinity -}}
affinity:
{{ toYaml .Values.nodeplugin.affinity . | indent 8 }}

View File

@ -90,6 +90,8 @@ spec:
mountPath: "/rootfs"
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes:
@ -102,6 +104,10 @@ spec:
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.provisioner.affinity -}}
affinity:
{{ toYaml .Values.provisioner.affinity . | indent 8 }}

View File

@ -94,6 +94,8 @@ spec:
mountPath: /dev
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
volumes:
- name: socket-dir
hostPath:
@ -111,3 +113,7 @@ spec:
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}

View File

@ -90,6 +90,8 @@ spec:
mountPath: /dev
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
volumes:
- name: mount-cache-dir
emptyDir: {}
@ -121,3 +123,7 @@ spec:
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}

View File

@ -105,6 +105,8 @@ spec:
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes:
@ -139,6 +141,10 @@ spec:
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.nodeplugin.affinity -}}
affinity:
{{ toYaml .Values.nodeplugin.affinity . | indent 8 }}

View File

@ -107,6 +107,8 @@ spec:
mountPath: "/rootfs"
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes:
@ -119,6 +121,10 @@ spec:
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.provisioner.affinity -}}
affinity:
{{ toYaml .Values.provisioner.affinity . | indent 8 }}

View File

@ -109,6 +109,8 @@ spec:
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
volumes:
- name: host-dev
hostPath:
@ -129,3 +131,7 @@ spec:
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}

View File

@ -90,6 +90,8 @@ spec:
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
volumes:
- name: socket-dir
hostPath:
@ -122,3 +124,7 @@ spec:
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}

View File

@ -41,7 +41,7 @@ func getFscID(monitors string, cr *util.Credentials, fsName string) (int64, erro
"ceph",
"-m", monitors,
"--id", cr.ID,
"--key="+cr.Key,
"--keyfile="+cr.KeyFile,
"-c", util.CephConfigPath,
"fs", "get", fsName, "--format=json",
)
@ -69,7 +69,7 @@ func getMetadataPool(monitors string, cr *util.Credentials, fsName string) (stri
"ceph",
"-m", monitors,
"--id", cr.ID,
"--key="+cr.Key,
"--keyfile="+cr.KeyFile,
"-c", util.CephConfigPath,
"fs", "ls", "--format=json",
)
@ -99,7 +99,7 @@ func getFsName(monitors string, cr *util.Credentials, fscID int64) (string, erro
"ceph",
"-m", monitors,
"--id", cr.ID,
"--key="+cr.Key,
"--keyfile="+cr.KeyFile,
"-c", util.CephConfigPath,
"fs", "dump", "--format=json",
)

View File

@ -40,7 +40,7 @@ func deleteCephUserDeprecated(volOptions *volumeOptions, adminCr *util.Credentia
return execCommandErr("ceph",
"-m", volOptions.Monitors,
"-n", adminID,
"--key="+adminCr.Key,
"--keyfile="+adminCr.KeyFile,
"-c", util.CephConfigPath,
"auth", "rm", userID,
)

View File

@ -46,10 +46,11 @@ var (
// createBackingVolume creates the backing subvolume and on any error cleans up any created entities
func (cs *ControllerServer) createBackingVolume(volOptions *volumeOptions, vID *volumeIdentifier, secret map[string]string) error {
cr, err := util.GetAdminCredentials(secret)
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
if err = createVolume(volOptions, cr, volumeID(vID.FsSubvolName), volOptions.Size); err != nil {
klog.Errorf("failed to create volume %s: %v", volOptions.RequestName, err)
@ -168,11 +169,12 @@ func (cs *ControllerServer) deleteVolumeDeprecated(req *csi.DeleteVolumeRequest)
// Deleting a volume requires admin credentials
cr, err := util.GetAdminCredentials(secrets)
cr, err := util.NewAdminCredentials(secrets)
if err != nil {
klog.Errorf("failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
idLk := volumeIDLocker.Lock(string(volID))
defer volumeIDLocker.Unlock(idLk, string(volID))
@ -225,11 +227,12 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
}
// Deleting a volume requires admin credentials
cr, err := util.GetAdminCredentials(secrets)
cr, err := util.NewAdminCredentials(secrets)
if err != nil {
klog.Errorf("failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
// lock out parallel delete and create requests against the same volume name as we
// cleanup the subvolume and associated omaps for the same

View File

@ -49,10 +49,11 @@ func checkVolExists(volOptions *volumeOptions, secret map[string]string) (*volum
vid volumeIdentifier
)
cr, err := util.GetAdminCredentials(secret)
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return nil, err
}
defer cr.DeleteCredentials()
imageUUID, err := volJournal.CheckReservation(volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, "")
@ -86,10 +87,11 @@ func checkVolExists(volOptions *volumeOptions, secret map[string]string) (*volum
// undoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName
func undoVolReservation(volOptions *volumeOptions, vid volumeIdentifier, secret map[string]string) error {
cr, err := util.GetAdminCredentials(secret)
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return err
}
defer cr.DeleteCredentials()
err = volJournal.UndoReservation(volOptions.Monitors, cr, volOptions.MetadataPool,
vid.FsSubvolName, volOptions.RequestName)
@ -105,10 +107,11 @@ func reserveVol(volOptions *volumeOptions, secret map[string]string) (*volumeIde
vid volumeIdentifier
)
cr, err := util.GetAdminCredentials(secret)
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return nil, err
}
defer cr.DeleteCredentials()
imageUUID, err := volJournal.ReserveName(volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, "")

View File

@ -95,20 +95,22 @@ func mountOneCacheEntry(volOptions *volumeOptions, vid *volumeIdentifier, me *vo
volID := vid.VolumeID
if volOptions.ProvisionVolume {
cr, err = util.GetAdminCredentials(decodeCredentials(me.Secrets))
cr, err = util.NewAdminCredentials(decodeCredentials(me.Secrets))
if err != nil {
return err
}
defer cr.DeleteCredentials()
volOptions.RootPath, err = getVolumeRootPathCeph(volOptions, cr, volumeID(vid.FsSubvolName))
if err != nil {
return err
}
} else {
cr, err = util.GetUserCredentials(decodeCredentials(me.Secrets))
cr, err = util.NewUserCredentials(decodeCredentials(me.Secrets))
if err != nil {
return err
}
defer cr.DeleteCredentials()
}
err = cleanupMountPoint(me.StagingPath)

View File

@ -44,30 +44,25 @@ var (
func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
var (
err error
cr *util.Credentials
secrets = req.GetSecrets()
)
if volOptions.ProvisionVolume {
// The volume is provisioned dynamically, get the credentials directly from Ceph
// The volume is provisioned dynamically, use passed in admin credentials
// First, get admin credentials - those are needed for retrieving the user credentials
adminCr, err := util.GetAdminCredentials(secrets)
cr, err = util.NewAdminCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
cr = adminCr
} else {
// The volume is pre-made, credentials are in node stage secrets
userCr, err := util.GetUserCredentials(req.GetSecrets())
cr, err = util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
}
cr = userCr
}
return cr, nil
@ -150,6 +145,7 @@ func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequ
klog.Errorf("failed to get ceph credentials for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
m, err := newMounter(volOptions)
if err != nil {

View File

@ -64,7 +64,7 @@ func getVolumeRootPathCeph(volOptions *volumeOptions, cr *util.Credentials, volI
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix+cr.ID,
"--key="+cr.Key)
"--keyfile="+cr.KeyFile)
if err != nil {
klog.Errorf("failed to get the rootpath for the vol %s(%s)", string(volID), err)
@ -90,7 +90,7 @@ func createVolume(volOptions *volumeOptions, cr *util.Credentials, volID volumeI
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix+cr.ID,
"--key="+cr.Key)
"--keyfile="+cr.KeyFile)
if err != nil {
klog.Errorf("failed to create subvolume group csi, for the vol %s(%s)", string(volID), err)
return err
@ -111,7 +111,7 @@ func createVolume(volOptions *volumeOptions, cr *util.Credentials, volID volumeI
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix+cr.ID,
"--key="+cr.Key)
"--keyfile="+cr.KeyFile)
if err != nil {
klog.Errorf("failed to create subvolume %s(%s) in fs %s", string(volID), err, volOptions.FsName)
return err
@ -198,7 +198,7 @@ func purgeVolume(volID volumeID, cr *util.Credentials, volOptions *volumeOptions
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix+cr.ID,
"--key="+cr.Key)
"--keyfile="+cr.KeyFile)
if err != nil {
klog.Errorf("failed to purge subvolume %s(%s) in fs %s", string(volID), err, volOptions.FsName)
return err

View File

@ -119,7 +119,7 @@ func mountFuse(mountPoint string, cr *util.Credentials, volOptions *volumeOption
mountPoint,
"-m", volOptions.Monitors,
"-c", util.CephConfigPath,
"-n", cephEntityClientPrefix + cr.ID, "--key=" + cr.Key,
"-n", cephEntityClientPrefix + cr.ID, "--keyfile=" + cr.KeyFile,
"-r", volOptions.RootPath,
"-o", "nonempty",
}
@ -176,7 +176,7 @@ func mountKernel(mountPoint string, cr *util.Credentials, volOptions *volumeOpti
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
mountPoint,
}
optionsStr := fmt.Sprintf("name=%s,secret=%s", cr.ID, cr.Key)
optionsStr := fmt.Sprintf("name=%s,secretfile=%s", cr.ID, cr.KeyFile)
if volOptions.FsName != "" {
optionsStr += fmt.Sprintf(",mds_namespace=%s", volOptions.FsName)
}

View File

@ -149,10 +149,11 @@ func newVolumeOptions(requestName string, size int64, volOptions, secret map[str
opts.RequestName = requestName
opts.Size = size
cr, err := util.GetAdminCredentials(secret)
cr, err := util.NewAdminCredentials(secret)
if err != nil {
return nil, err
}
defer cr.DeleteCredentials()
opts.FscID, err = getFscID(opts.Monitors, cr, opts.FsName)
if err != nil {
@ -194,10 +195,11 @@ func newVolumeOptionsFromVolID(volID string, volOpt, secrets map[string]string)
return nil, nil, errors.Wrapf(err, "failed to fetch monitor list using clusterID (%s)", vi.ClusterID)
}
cr, err := util.GetAdminCredentials(secrets)
cr, err := util.NewAdminCredentials(secrets)
if err != nil {
return nil, nil, err
}
defer cr.DeleteCredentials()
volOptions.FsName, err = getFsName(volOptions.Monitors, cr, volOptions.FscID)
if err != nil {

View File

@ -112,10 +112,11 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
return nil, err
}
cr, err := util.GetUserCredentials(req.GetSecrets())
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
rbdVol, err := cs.parseVolCreateRequest(req)
if err != nil {
@ -179,10 +180,11 @@ func (cs *ControllerServer) createBackingImage(rbdVol *rbdVolume, req *csi.Creat
return err
}
} else {
cr, err := util.GetUserCredentials(req.GetSecrets())
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
err = createImage(rbdVol, volSizeMiB, cr)
if err != nil {
@ -206,10 +208,11 @@ func (cs *ControllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol *
return status.Error(codes.InvalidArgument, "volume Snapshot ID cannot be empty")
}
cr, err := util.GetUserCredentials(req.GetSecrets())
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
rbdSnap := &rbdSnapshot{}
if err = genSnapFromSnapID(rbdSnap, snapshotID, cr); err != nil {
@ -279,10 +282,11 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
return nil, err
}
cr, err := util.GetUserCredentials(req.GetSecrets())
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
// For now the image get unconditionally deleted, but here retention policy can be checked
volumeID := req.GetVolumeId()
@ -381,10 +385,11 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
return nil, err
}
cr, err := util.GetUserCredentials(req.GetSecrets())
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
// Fetch source volume information
rbdVol := new(rbdVolume)
@ -533,10 +538,11 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
return nil, err
}
cr, err := util.GetUserCredentials(req.GetSecrets())
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
snapshotID := req.GetSnapshotId()
if snapshotID == "" {

View File

@ -62,10 +62,11 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
volID := req.GetVolumeId()
cr, err := util.GetUserCredentials(req.GetSecrets())
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
isLegacyVolume := false
volName, err := getVolumeName(req.GetVolumeId())

View File

@ -277,7 +277,7 @@ func createPath(volOpt *rbdVolume, cr *util.Credentials) (string, error) {
}
output, err := execCommand(cmdName, []string{
"map", imagePath, "--id", cr.ID, "-m", volOpt.Monitors, "--key=" + cr.Key})
"map", imagePath, "--id", cr.ID, "-m", volOpt.Monitors, "--keyfile=" + cr.KeyFile})
if err != nil {
klog.Warningf("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))

View File

@ -116,7 +116,7 @@ func createImage(pOpts *rbdVolume, volSz int64, cr *util.Credentials) error {
} else {
klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s", image, volSzMiB, pOpts.ImageFormat, pOpts.Monitors, pOpts.Pool)
}
args := []string{"create", image, "--size", volSzMiB, "--pool", pOpts.Pool, "--id", cr.ID, "-m", pOpts.Monitors, "--key=" + cr.Key, "--image-format", pOpts.ImageFormat}
args := []string{"create", image, "--size", volSzMiB, "--pool", pOpts.Pool, "--id", cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile, "--image-format", pOpts.ImageFormat}
if pOpts.ImageFormat == rbdImageFormat2 {
args = append(args, "--image-feature", pOpts.ImageFeatures)
}
@ -138,7 +138,7 @@ func rbdStatus(pOpts *rbdVolume, cr *util.Credentials) (bool, string, error) {
image := pOpts.RbdImageName
klog.V(4).Infof("rbd: status %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
args := []string{"status", image, "--pool", pOpts.Pool, "-m", pOpts.Monitors, "--id", cr.ID, "--key=" + cr.Key}
args := []string{"status", image, "--pool", pOpts.Pool, "-m", pOpts.Monitors, "--id", cr.ID, "--keyfile=" + cr.KeyFile}
cmd, err := execCommand("rbd", args)
output = string(cmd)
@ -179,7 +179,7 @@ func deleteImage(pOpts *rbdVolume, cr *util.Credentials) error {
klog.V(4).Infof("rbd: rm %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
args := []string{"rm", image, "--pool", pOpts.Pool, "--id", cr.ID, "-m", pOpts.Monitors,
"--key=" + cr.Key}
"--keyfile=" + cr.KeyFile}
output, err = execCommand("rbd", args)
if err != nil {
klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
@ -486,7 +486,7 @@ func protectSnapshot(pOpts *rbdSnapshot, cr *util.Credentials) error {
klog.V(4).Infof("rbd: snap protect %s using mon %s, pool %s ", image, pOpts.Monitors, pOpts.Pool)
args := []string{"snap", "protect", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
cr.ID, "-m", pOpts.Monitors, "--key=" + cr.Key}
cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile}
output, err := execCommand("rbd", args)
@ -505,7 +505,7 @@ func createSnapshot(pOpts *rbdSnapshot, cr *util.Credentials) error {
klog.V(4).Infof("rbd: snap create %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
args := []string{"snap", "create", "--pool", pOpts.Pool, "--snap", snapName, image,
"--id", cr.ID, "-m", pOpts.Monitors, "--key=" + cr.Key}
"--id", cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile}
output, err := execCommand("rbd", args)
@ -524,7 +524,7 @@ func unprotectSnapshot(pOpts *rbdSnapshot, cr *util.Credentials) error {
klog.V(4).Infof("rbd: snap unprotect %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
args := []string{"snap", "unprotect", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
cr.ID, "-m", pOpts.Monitors, "--key=" + cr.Key}
cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile}
output, err := execCommand("rbd", args)
@ -543,7 +543,7 @@ func deleteSnapshot(pOpts *rbdSnapshot, cr *util.Credentials) error {
klog.V(4).Infof("rbd: snap rm %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
args := []string{"snap", "rm", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
cr.ID, "-m", pOpts.Monitors, "--key=" + cr.Key}
cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile}
output, err := execCommand("rbd", args)
@ -567,7 +567,7 @@ func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, cr *util.Crede
klog.V(4).Infof("rbd: clone %s using mon %s, pool %s", image, pVolOpts.Monitors, pVolOpts.Pool)
args := []string{"clone", pSnapOpts.Pool + "/" + pSnapOpts.RbdImageName + "@" + snapName,
pVolOpts.Pool + "/" + image, "--id", cr.ID, "-m", pVolOpts.Monitors, "--key=" + cr.Key}
pVolOpts.Pool + "/" + image, "--id", cr.ID, "-m", pVolOpts.Monitors, "--keyfile=" + cr.KeyFile}
output, err := execCommand("rbd", args)
@ -624,7 +624,7 @@ func getImageInfo(monitors string, cr *util.Credentials, poolName, imageName str
"rbd",
"-m", monitors,
"--id", cr.ID,
"--key="+cr.Key,
"--keyfile="+cr.KeyFile,
"-c", util.CephConfigPath,
"--format="+"json",
"info", poolName+"/"+imageName)
@ -673,7 +673,7 @@ func getSnapInfo(monitors string, cr *util.Credentials, poolName, imageName, sna
"rbd",
"-m", monitors,
"--id", cr.ID,
"--key="+cr.Key,
"--keyfile="+cr.KeyFile,
"-c", util.CephConfigPath,
"--format="+"json",
"snap", "ls", poolName+"/"+imageName)

View File

@ -63,7 +63,7 @@ func getPools(monitors string, cr *Credentials) ([]cephStoragePoolSummary, error
"ceph",
"-m", monitors,
"--id", cr.ID,
"--key="+cr.Key,
"--keyfile="+cr.KeyFile,
"-c", CephConfigPath,
"-f", "json",
"osd", "lspools")
@ -122,7 +122,7 @@ func SetOMapKeyValue(monitors string, cr *Credentials, poolName, namespace, oMap
args := []string{
"-m", monitors,
"--id", cr.ID,
"--key=" + cr.Key,
"--keyfile=" + cr.KeyFile,
"-c", CephConfigPath,
"-p", poolName,
"setomapval", oMapName, oMapKey, keyValue,
@ -157,7 +157,7 @@ func GetOMapValue(monitors string, cr *Credentials, poolName, namespace, oMapNam
args := []string{
"-m", monitors,
"--id", cr.ID,
"--key=" + cr.Key,
"--keyfile=" + cr.KeyFile,
"-c", CephConfigPath,
"-p", poolName,
"getomapval", oMapName, oMapKey, tmpFile.Name(),
@ -199,7 +199,7 @@ func RemoveOMapKey(monitors string, cr *Credentials, poolName, namespace, oMapNa
args := []string{
"-m", monitors,
"--id", cr.ID,
"--key=" + cr.Key,
"--keyfile=" + cr.KeyFile,
"-c", CephConfigPath,
"-p", poolName,
"rmomapkey", oMapName, oMapKey,
@ -227,7 +227,7 @@ func CreateObject(monitors string, cr *Credentials, poolName, namespace, objectN
args := []string{
"-m", monitors,
"--id", cr.ID,
"--key=" + cr.Key,
"--keyfile=" + cr.KeyFile,
"-c", CephConfigPath,
"-p", poolName,
"create", objectName,
@ -257,7 +257,7 @@ func RemoveObject(monitors string, cr *Credentials, poolName, namespace, oMapNam
args := []string{
"-m", monitors,
"--id", cr.ID,
"--key=" + cr.Key,
"--keyfile=" + cr.KeyFile,
"-c", CephConfigPath,
"-p", poolName,
"rm", oMapName,

View File

@ -18,22 +18,54 @@ package util
import (
"fmt"
"io/ioutil"
"os"
)
const (
credUserID = "userID"
credUserKey = "userKey"
credAdminID = "adminID"
credAdminKey = "adminKey"
credMonitors = "monitors"
credUserID = "userID"
credUserKey = "userKey"
credAdminID = "adminID"
credAdminKey = "adminKey"
credMonitors = "monitors"
tmpKeyFileLocation = "/tmp/csi/keys"
tmpKeyFileNamePrefix = "keyfile-"
)
type Credentials struct {
ID string
Key string
ID string
KeyFile string
}
func getCredentials(idField, keyField string, secrets map[string]string) (*Credentials, error) {
func storeKey(key string) (string, error) {
tmpfile, err := ioutil.TempFile(tmpKeyFileLocation, tmpKeyFileNamePrefix)
if err != nil {
return "", fmt.Errorf("error creating a temporary keyfile (%s)", err)
}
defer func() {
if err != nil {
os.Remove(tmpfile.Name())
}
}()
if _, err = tmpfile.Write([]byte(key)); err != nil {
return "", fmt.Errorf("error writing key to temporary keyfile (%s)", err)
}
keyFile := tmpfile.Name()
if keyFile == "" {
err = fmt.Errorf("error reading temporary filename for key (%s)", err)
return "", err
}
if err = tmpfile.Close(); err != nil {
return "", fmt.Errorf("error closing temporary filename (%s)", err)
}
return keyFile, nil
}
func newCredentialsFromSecret(idField, keyField string, secrets map[string]string) (*Credentials, error) {
var (
c = &Credentials{}
ok bool
@ -43,19 +75,41 @@ func getCredentials(idField, keyField string, secrets map[string]string) (*Crede
return nil, fmt.Errorf("missing ID field '%s' in secrets", idField)
}
if c.Key, ok = secrets[keyField]; !ok {
key := secrets[keyField]
if key == "" {
return nil, fmt.Errorf("missing key field '%s' in secrets", keyField)
}
return c, nil
keyFile, err := storeKey(key)
if err == nil {
c.KeyFile = keyFile
}
return c, err
}
func GetUserCredentials(secrets map[string]string) (*Credentials, error) {
return getCredentials(credUserID, credUserKey, secrets)
func (cr *Credentials) DeleteCredentials() {
os.Remove(cr.KeyFile)
}
func GetAdminCredentials(secrets map[string]string) (*Credentials, error) {
return getCredentials(credAdminID, credAdminKey, secrets)
func NewUserCredentials(secrets map[string]string) (*Credentials, error) {
return newCredentialsFromSecret(credUserID, credUserKey, secrets)
}
func NewAdminCredentials(secrets map[string]string) (*Credentials, error) {
return newCredentialsFromSecret(credAdminID, credAdminKey, secrets)
}
func NewCredentials(id, key string) (*Credentials, error) {
var c = &Credentials{}
c.ID = id
keyFile, err := storeKey(key)
if err == nil {
c.KeyFile = keyFile
}
return c, err
}
func GetMonValFromSecret(secrets map[string]string) (string, error) {

View File

@ -22,15 +22,17 @@ import (
const (
keyArg = "--key="
keyFileArg = "--keyfile="
secretArg = "secret="
optionsArgSeparator = ','
strippedKey = "--key=***stripped***"
strippedKeyFile = "--keyfile=***stripped***"
strippedSecret = "secret=***stripped***"
)
// StripSecretInArgs strips values of either "--key" or "secret=".
// StripSecretInArgs strips values of either "--key"/"--keyfile" or "secret=".
// `args` is left unchanged.
// Expects only one occurrence of either "--key" or "secret=".
// Expects only one occurrence of either "--key"/"--keyfile" or "secret=".
func StripSecretInArgs(args []string) []string {
out := make([]string, len(args))
copy(out, args)
@ -48,6 +50,11 @@ func stripKey(out []string) bool {
out[i] = strippedKey
return true
}
if strings.HasPrefix(out[i], keyFileArg) {
out[i] = strippedKeyFile
return true
}
}
return false