mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
Addressed using k8s client APIs to fetch secrets
Based on the review comments addressed the following, - Moved away from having to update the pod with volumes when a new Ceph cluster is added for provisioning via the CSI driver - The above now used k8s APIs to fetch secrets - TBD: Need to add a watch mechanisim such that these secrets can be cached and updated when changed - Folded the Cephc configuration and ID/key config map and secrets into a single secret - Provided the ability to read the same config via mapped or created files within the pod Tests: - Ran PV creation/deletion/attach/use using new scheme StorageClass - Ran PV creation/deletion/attach/use using older scheme to ensure nothing is broken - Did not execute snapshot related tests Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
97f8c4b677
commit
2064e674a4
@ -47,9 +47,8 @@ type Driver struct {
|
||||
|
||||
var (
|
||||
version = "1.0.0"
|
||||
// Fc is the global file config type, and stores the top level directory
|
||||
// under which rest of the Ceph config files can be found
|
||||
Fc util.FileConfig
|
||||
// ConfStore is the global config store
|
||||
ConfStore *util.ConfigStore
|
||||
)
|
||||
|
||||
// NewDriver returns new rbd driver
|
||||
@ -94,8 +93,11 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, co
|
||||
var err error
|
||||
klog.Infof("Driver: %v version: %v", driverName, version)
|
||||
|
||||
// Initialize fileconfig base path
|
||||
Fc.BasePath = configroot
|
||||
// Initialize config store
|
||||
ConfStore, err = util.NewConfigStore(configroot)
|
||||
if err != nil {
|
||||
klog.Fatalln("Failed to initialize config store.")
|
||||
}
|
||||
|
||||
// Initialize default library driver
|
||||
r.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
|
||||
|
@ -91,9 +91,10 @@ func getRBDKey(fsid string, id string, credentials map[string]string) (string, e
|
||||
var ok bool
|
||||
var err error
|
||||
var key string
|
||||
|
||||
if key, ok = credentials[id]; !ok {
|
||||
if fsid != "" {
|
||||
key, err = Fc.GetCredentialForSubject(fsid, id)
|
||||
key, err = ConfStore.CredentialForUser(fsid, id)
|
||||
if err != nil {
|
||||
klog.Errorf("failed getting credentials (%s)", err)
|
||||
return "", fmt.Errorf("RBD key for ID: %s not found in config store", id)
|
||||
@ -240,8 +241,7 @@ func execCommand(command string, args []string) ([]byte, error) {
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret string, noerr error) {
|
||||
var err error
|
||||
func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret string, err error) {
|
||||
var ok bool
|
||||
|
||||
monitors, ok = options["monitors"]
|
||||
@ -250,11 +250,14 @@ func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret stri
|
||||
if monInSecret, ok = options["monValueFromSecret"]; !ok {
|
||||
// if mons are not in secret, check if we have a cluster-fsid
|
||||
if fsID, ok = options["clusterID"]; !ok {
|
||||
return "", "", "", fmt.Errorf("either monitors or monValueFromSecret or clusterID must be set")
|
||||
err = errors.New("either monitors or monValueFromSecret or clusterID must be set")
|
||||
return
|
||||
}
|
||||
if monitors, err = Fc.GetMons(fsID); err != nil {
|
||||
|
||||
if monitors, err = ConfStore.Mons(fsID); err != nil {
|
||||
klog.Errorf("failed getting mons (%s)", err)
|
||||
return "", "", "", fmt.Errorf("failed to fetch monitor list using clusterID (%s)", fsID)
|
||||
err = fmt.Errorf("failed to fetch monitor list using clusterID (%s)", fsID)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -262,35 +265,34 @@ func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret stri
|
||||
return
|
||||
}
|
||||
|
||||
func getIDs(options map[string]string, fsID string) (adminID, userID string, noerr error) {
|
||||
var err error
|
||||
func getIDs(options map[string]string, fsID string) (adminID, userID string, err error) {
|
||||
var ok bool
|
||||
|
||||
adminID, ok = options["adminid"]
|
||||
if !ok {
|
||||
if fsID != "" {
|
||||
if adminID, err = Fc.GetProvisionerSubjectID(fsID); err != nil {
|
||||
klog.Errorf("failed getting subject (%s)", err)
|
||||
return "", "", fmt.Errorf("failed to fetch provisioner ID using clusterID (%s)", fsID)
|
||||
}
|
||||
} else {
|
||||
adminID = rbdDefaultAdminID
|
||||
switch {
|
||||
case ok:
|
||||
case fsID != "":
|
||||
if adminID, err = ConfStore.AdminID(fsID); err != nil {
|
||||
klog.Errorf("failed getting subject (%s)", err)
|
||||
return "", "", fmt.Errorf("failed to fetch provisioner ID using clusterID (%s)", fsID)
|
||||
}
|
||||
default:
|
||||
adminID = rbdDefaultAdminID
|
||||
}
|
||||
|
||||
userID, ok = options["userid"]
|
||||
if !ok {
|
||||
if fsID != "" {
|
||||
if userID, err = Fc.GetPublishSubjectID(fsID); err != nil {
|
||||
klog.Errorf("failed getting subject (%s)", err)
|
||||
return "", "", fmt.Errorf("failed to fetch publisher ID using clusterID (%s)", fsID)
|
||||
}
|
||||
} else {
|
||||
userID = rbdDefaultUserID
|
||||
switch {
|
||||
case ok:
|
||||
case fsID != "":
|
||||
if userID, err = ConfStore.UserID(fsID); err != nil {
|
||||
klog.Errorf("failed getting subject (%s)", err)
|
||||
return "", "", fmt.Errorf("failed to fetch publisher ID using clusterID (%s)", fsID)
|
||||
}
|
||||
default:
|
||||
userID = rbdDefaultUserID
|
||||
}
|
||||
|
||||
return
|
||||
return adminID, userID, err
|
||||
}
|
||||
|
||||
func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) {
|
||||
|
Reference in New Issue
Block a user