mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
Use --keyfile option to pass keys to all Ceph CLIs
Every Ceph CLI that is invoked at present passes the key via the --key option, and hence is exposed to key being displayed on the host using a ps command or such means. This commit addresses this issue by stashing the key in a tmp file, which is again created on a tmpfs (or empty dir backed by memory). Further using such tmp files as arguments to the --keyfile option for every CLI that is invoked. This prevents the key from being visible as part of the argument list of the invoked program on the system. Fixes: #318 Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
c2835183e5
commit
bd204d7d45
@ -41,7 +41,7 @@ func getFscID(monitors string, cr *util.Credentials, fsName string) (int64, erro
|
||||
"ceph",
|
||||
"-m", monitors,
|
||||
"--id", cr.ID,
|
||||
"--key="+cr.Key,
|
||||
"--keyfile="+cr.KeyFile,
|
||||
"-c", util.CephConfigPath,
|
||||
"fs", "get", fsName, "--format=json",
|
||||
)
|
||||
@ -69,7 +69,7 @@ func getMetadataPool(monitors string, cr *util.Credentials, fsName string) (stri
|
||||
"ceph",
|
||||
"-m", monitors,
|
||||
"--id", cr.ID,
|
||||
"--key="+cr.Key,
|
||||
"--keyfile="+cr.KeyFile,
|
||||
"-c", util.CephConfigPath,
|
||||
"fs", "ls", "--format=json",
|
||||
)
|
||||
@ -99,7 +99,7 @@ func getFsName(monitors string, cr *util.Credentials, fscID int64) (string, erro
|
||||
"ceph",
|
||||
"-m", monitors,
|
||||
"--id", cr.ID,
|
||||
"--key="+cr.Key,
|
||||
"--keyfile="+cr.KeyFile,
|
||||
"-c", util.CephConfigPath,
|
||||
"fs", "dump", "--format=json",
|
||||
)
|
||||
|
@ -40,7 +40,7 @@ func deleteCephUserDeprecated(volOptions *volumeOptions, adminCr *util.Credentia
|
||||
return execCommandErr("ceph",
|
||||
"-m", volOptions.Monitors,
|
||||
"-n", adminID,
|
||||
"--key="+adminCr.Key,
|
||||
"--keyfile="+adminCr.KeyFile,
|
||||
"-c", util.CephConfigPath,
|
||||
"auth", "rm", userID,
|
||||
)
|
||||
|
@ -46,10 +46,11 @@ var (
|
||||
|
||||
// createBackingVolume creates the backing subvolume and on any error cleans up any created entities
|
||||
func (cs *ControllerServer) createBackingVolume(volOptions *volumeOptions, vID *volumeIdentifier, secret map[string]string) error {
|
||||
cr, err := util.GetAdminCredentials(secret)
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
if err = createVolume(volOptions, cr, volumeID(vID.FsSubvolName), volOptions.Size); err != nil {
|
||||
klog.Errorf("failed to create volume %s: %v", volOptions.RequestName, err)
|
||||
@ -168,11 +169,12 @@ func (cs *ControllerServer) deleteVolumeDeprecated(req *csi.DeleteVolumeRequest)
|
||||
|
||||
// Deleting a volume requires admin credentials
|
||||
|
||||
cr, err := util.GetAdminCredentials(secrets)
|
||||
cr, err := util.NewAdminCredentials(secrets)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to retrieve admin credentials: %v", err)
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
idLk := volumeIDLocker.Lock(string(volID))
|
||||
defer volumeIDLocker.Unlock(idLk, string(volID))
|
||||
@ -225,11 +227,12 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
}
|
||||
|
||||
// Deleting a volume requires admin credentials
|
||||
cr, err := util.GetAdminCredentials(secrets)
|
||||
cr, err := util.NewAdminCredentials(secrets)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to retrieve admin credentials: %v", err)
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
// lock out parallel delete and create requests against the same volume name as we
|
||||
// cleanup the subvolume and associated omaps for the same
|
||||
|
@ -49,10 +49,11 @@ func checkVolExists(volOptions *volumeOptions, secret map[string]string) (*volum
|
||||
vid volumeIdentifier
|
||||
)
|
||||
|
||||
cr, err := util.GetAdminCredentials(secret)
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
imageUUID, err := volJournal.CheckReservation(volOptions.Monitors, cr,
|
||||
volOptions.MetadataPool, volOptions.RequestName, "")
|
||||
@ -86,10 +87,11 @@ func checkVolExists(volOptions *volumeOptions, secret map[string]string) (*volum
|
||||
|
||||
// undoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName
|
||||
func undoVolReservation(volOptions *volumeOptions, vid volumeIdentifier, secret map[string]string) error {
|
||||
cr, err := util.GetAdminCredentials(secret)
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
err = volJournal.UndoReservation(volOptions.Monitors, cr, volOptions.MetadataPool,
|
||||
vid.FsSubvolName, volOptions.RequestName)
|
||||
@ -105,10 +107,11 @@ func reserveVol(volOptions *volumeOptions, secret map[string]string) (*volumeIde
|
||||
vid volumeIdentifier
|
||||
)
|
||||
|
||||
cr, err := util.GetAdminCredentials(secret)
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
imageUUID, err := volJournal.ReserveName(volOptions.Monitors, cr,
|
||||
volOptions.MetadataPool, volOptions.RequestName, "")
|
||||
|
@ -95,20 +95,22 @@ func mountOneCacheEntry(volOptions *volumeOptions, vid *volumeIdentifier, me *vo
|
||||
volID := vid.VolumeID
|
||||
|
||||
if volOptions.ProvisionVolume {
|
||||
cr, err = util.GetAdminCredentials(decodeCredentials(me.Secrets))
|
||||
cr, err = util.NewAdminCredentials(decodeCredentials(me.Secrets))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
volOptions.RootPath, err = getVolumeRootPathCeph(volOptions, cr, volumeID(vid.FsSubvolName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
cr, err = util.GetUserCredentials(decodeCredentials(me.Secrets))
|
||||
cr, err = util.NewUserCredentials(decodeCredentials(me.Secrets))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
}
|
||||
|
||||
err = cleanupMountPoint(me.StagingPath)
|
||||
|
@ -44,30 +44,25 @@ var (
|
||||
|
||||
func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
|
||||
var (
|
||||
err error
|
||||
cr *util.Credentials
|
||||
secrets = req.GetSecrets()
|
||||
)
|
||||
|
||||
if volOptions.ProvisionVolume {
|
||||
// The volume is provisioned dynamically, get the credentials directly from Ceph
|
||||
// The volume is provisioned dynamically, use passed in admin credentials
|
||||
|
||||
// First, get admin credentials - those are needed for retrieving the user credentials
|
||||
|
||||
adminCr, err := util.GetAdminCredentials(secrets)
|
||||
cr, err = util.NewAdminCredentials(secrets)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
|
||||
}
|
||||
|
||||
cr = adminCr
|
||||
} else {
|
||||
// The volume is pre-made, credentials are in node stage secrets
|
||||
|
||||
userCr, err := util.GetUserCredentials(req.GetSecrets())
|
||||
cr, err = util.NewUserCredentials(req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
|
||||
}
|
||||
|
||||
cr = userCr
|
||||
}
|
||||
|
||||
return cr, nil
|
||||
@ -150,6 +145,7 @@ func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequ
|
||||
klog.Errorf("failed to get ceph credentials for volume %s: %v", volID, err)
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
m, err := newMounter(volOptions)
|
||||
if err != nil {
|
||||
|
@ -64,7 +64,7 @@ func getVolumeRootPathCeph(volOptions *volumeOptions, cr *util.Credentials, volI
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", util.CephConfigPath,
|
||||
"-n", cephEntityClientPrefix+cr.ID,
|
||||
"--key="+cr.Key)
|
||||
"--keyfile="+cr.KeyFile)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get the rootpath for the vol %s(%s)", string(volID), err)
|
||||
@ -90,7 +90,7 @@ func createVolume(volOptions *volumeOptions, cr *util.Credentials, volID volumeI
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", util.CephConfigPath,
|
||||
"-n", cephEntityClientPrefix+cr.ID,
|
||||
"--key="+cr.Key)
|
||||
"--keyfile="+cr.KeyFile)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create subvolume group csi, for the vol %s(%s)", string(volID), err)
|
||||
return err
|
||||
@ -111,7 +111,7 @@ func createVolume(volOptions *volumeOptions, cr *util.Credentials, volID volumeI
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", util.CephConfigPath,
|
||||
"-n", cephEntityClientPrefix+cr.ID,
|
||||
"--key="+cr.Key)
|
||||
"--keyfile="+cr.KeyFile)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create subvolume %s(%s) in fs %s", string(volID), err, volOptions.FsName)
|
||||
return err
|
||||
@ -198,7 +198,7 @@ func purgeVolume(volID volumeID, cr *util.Credentials, volOptions *volumeOptions
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", util.CephConfigPath,
|
||||
"-n", cephEntityClientPrefix+cr.ID,
|
||||
"--key="+cr.Key)
|
||||
"--keyfile="+cr.KeyFile)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to purge subvolume %s(%s) in fs %s", string(volID), err, volOptions.FsName)
|
||||
return err
|
||||
|
@ -119,7 +119,7 @@ func mountFuse(mountPoint string, cr *util.Credentials, volOptions *volumeOption
|
||||
mountPoint,
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", util.CephConfigPath,
|
||||
"-n", cephEntityClientPrefix + cr.ID, "--key=" + cr.Key,
|
||||
"-n", cephEntityClientPrefix + cr.ID, "--keyfile=" + cr.KeyFile,
|
||||
"-r", volOptions.RootPath,
|
||||
"-o", "nonempty",
|
||||
}
|
||||
@ -176,7 +176,7 @@ func mountKernel(mountPoint string, cr *util.Credentials, volOptions *volumeOpti
|
||||
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
|
||||
mountPoint,
|
||||
}
|
||||
optionsStr := fmt.Sprintf("name=%s,secret=%s", cr.ID, cr.Key)
|
||||
optionsStr := fmt.Sprintf("name=%s,secretfile=%s", cr.ID, cr.KeyFile)
|
||||
if volOptions.FsName != "" {
|
||||
optionsStr += fmt.Sprintf(",mds_namespace=%s", volOptions.FsName)
|
||||
}
|
||||
|
@ -149,10 +149,11 @@ func newVolumeOptions(requestName string, size int64, volOptions, secret map[str
|
||||
opts.RequestName = requestName
|
||||
opts.Size = size
|
||||
|
||||
cr, err := util.GetAdminCredentials(secret)
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
opts.FscID, err = getFscID(opts.Monitors, cr, opts.FsName)
|
||||
if err != nil {
|
||||
@ -194,10 +195,11 @@ func newVolumeOptionsFromVolID(volID string, volOpt, secrets map[string]string)
|
||||
return nil, nil, errors.Wrapf(err, "failed to fetch monitor list using clusterID (%s)", vi.ClusterID)
|
||||
}
|
||||
|
||||
cr, err := util.GetAdminCredentials(secrets)
|
||||
cr, err := util.NewAdminCredentials(secrets)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
volOptions.FsName, err = getFsName(volOptions.Monitors, cr, volOptions.FscID)
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user