cephfs: specify monitors explicitly

This commit is contained in:
gman 2019-02-13 13:57:16 +01:00 committed by mergify[bot]
parent 890740553f
commit a63b06a620
6 changed files with 54 additions and 58 deletions

View File

@ -18,6 +18,7 @@ package cephfs
import (
"fmt"
"io/ioutil"
"os"
"path"
"text/template"
@ -25,15 +26,14 @@ import (
"k8s.io/klog"
)
const cephConfig = `[global]
mon_host = {{.Monitors}}
var cephConfig = []byte(`[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# Workaround for http://tracker.ceph.com/issues/23446
fuse_set_user_groups = false
`
`)
const cephKeyring = `[client.{{.UserID}}]
key = {{.Key}}
@ -43,13 +43,12 @@ const cephSecret = `{{.Key}}` // #nosec
const (
cephConfigRoot = "/etc/ceph"
cephConfigFileNameFmt = "ceph.share.%s.conf"
cephConfigPath = "/etc/ceph/ceph.conf"
cephKeyringFileNameFmt = "ceph.share.%s.client.%s.keyring"
cephSecretFileNameFmt = "ceph.share.%s.client.%s.secret" // #nosec
)
var (
cephConfigTempl *template.Template
cephKeyringTempl *template.Template
cephSecretTempl *template.Template
)
@ -65,19 +64,24 @@ func init() {
},
}
cephConfigTempl = template.Must(template.New("config").Parse(cephConfig))
cephKeyringTempl = template.Must(template.New("keyring").Funcs(fm).Parse(cephKeyring))
cephSecretTempl = template.Must(template.New("secret").Parse(cephSecret))
}
type cephConfigData struct {
Monitors string
VolumeID volumeID
func createCephConfigRoot() error {
return os.MkdirAll(cephConfigRoot, 0755) // #nosec
}
func writeCephConfig() error {
if err := createCephConfigRoot(); err != nil {
return err
}
return ioutil.WriteFile(cephConfigPath, cephConfig, 0640)
}
func writeCephTemplate(fileName string, m os.FileMode, t *template.Template, data interface{}) error {
// #nosec
if err := os.MkdirAll(cephConfigRoot, 0755); err != nil {
if err := createCephConfigRoot(); err != nil {
return err
}
@ -98,10 +102,6 @@ func writeCephTemplate(fileName string, m os.FileMode, t *template.Template, dat
return t.Execute(f, data)
}
func (d *cephConfigData) writeToFile() error {
return writeCephTemplate(fmt.Sprintf(cephConfigFileNameFmt, d.VolumeID), 0640, cephConfigTempl, d)
}
type cephKeyringData struct {
UserID, Key string
VolumeID volumeID
@ -127,7 +127,3 @@ func getCephSecretPath(volID volumeID, userID string) string {
func getCephKeyringPath(volID volumeID, userID string) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volID, userID))
}
func getCephConfPath(volID volumeID) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephConfigFileNameFmt, volID))
}

View File

@ -53,12 +53,13 @@ func getCephUserName(volID volumeID) string {
return cephUserPrefix + string(volID)
}
func getCephUser(adminCr *credentials, volID volumeID) (*cephEntity, error) {
func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
entityName := cephEntityClientPrefix + getCephUserName(volID)
var ents []cephEntity
args := [...]string{
"auth", "-f", "json", "-c", getCephConfPath(volID), "-n", cephEntityClientPrefix + adminCr.id,
"-m", volOptions.Monitors,
"auth", "-f", "json", "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
"get", entityName,
}
@ -91,7 +92,8 @@ func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volum
var ents []cephEntity
args := [...]string{
"auth", "-f", "json", "-c", getCephConfPath(volID), "-n", cephEntityClientPrefix + adminCr.id,
"-m", volOptions.Monitors,
"auth", "-f", "json", "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
"get-or-create", cephEntityClientPrefix + getCephUserName(volID),
"mds", caps.Mds,
"mon", caps.Mon,
@ -105,11 +107,12 @@ func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volum
return &ents[0], nil
}
func deleteCephUser(adminCr *credentials, volID volumeID) error {
func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) error {
userID := getCephUserName(volID)
args := [...]string{
"-c", getCephConfPath(volID), "-n", cephEntityClientPrefix + adminCr.id,
"-m", volOptions.Monitors,
"-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
"auth", "rm", cephEntityClientPrefix + userID,
}
@ -118,12 +121,12 @@ func deleteCephUser(adminCr *credentials, volID volumeID) error {
return err
}
keyringPath := getCephKeyringPath(volID, userID)
keyringPath := getCephKeyringPath(volID, adminCr.id)
if err = os.Remove(keyringPath); err != nil {
klog.Errorf("failed to remove keyring file %s with error %s", keyringPath, err)
}
secretPath := getCephSecretPath(volID, userID)
secretPath := getCephSecretPath(volID, adminCr.id)
if err = os.Remove(secretPath); err != nil {
klog.Errorf("failed to remove secret file %s with error %s", secretPath, err)
}

View File

@ -46,7 +46,9 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
klog.Errorf("CreateVolumeRequest validation failed: %v", err)
return nil, err
}
// Configuration
secret := req.GetSecrets()
volOptions, err := newVolumeOptions(req.GetParameters(), secret)
if err != nil {
@ -55,11 +57,6 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
}
volID := makeVolumeID(req.GetName())
conf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volID}
if err = conf.writeToFile(); err != nil {
klog.Errorf("failed to write ceph config file to %s: %v", getCephConfPath(volID), err)
return nil, status.Error(codes.Internal, err.Error())
}
// Create a volume in case the user didn't provide one
@ -115,6 +112,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
var (
volID = volumeID(req.GetVolumeId())
secrets = req.GetSecrets()
err error
)
@ -129,18 +127,17 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
klog.Warningf("volume %s is provisioned statically, aborting delete", volID)
return &csi.DeleteVolumeResponse{}, nil
}
// mons may have changed since create volume,
// retrieve the latest mons and override old mons
secret := req.GetSecrets()
mon := ""
if mon, err = getMonValFromSecret(secret); err == nil && len(mon) > 0 {
klog.Infof("override old mons [%q] with [%q]", ce.VolOptions.Monitors, mon)
if mon, secretsErr := getMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
klog.Infof("overriding monitors [%q] with [%q] for volume %s", ce.VolOptions.Monitors, mon, volID)
ce.VolOptions.Monitors = mon
}
// Deleting a volume requires admin credentials
cr, err := getAdminCredentials(secret)
cr, err := getAdminCredentials(secrets)
if err != nil {
klog.Errorf("failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
@ -151,7 +148,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
return nil, status.Error(codes.Internal, err.Error())
}
if err = deleteCephUser(cr, volID); err != nil {
if err = deleteCephUser(&ce.VolOptions, cr, volID); err != nil {
klog.Errorf("failed to delete ceph user for volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
}

View File

@ -99,11 +99,15 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter string, cacheP
klog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter)
if err := writeCephConfig(); err != nil {
klog.Fatalf("failed to write ceph configuration file: %v", err)
}
// Initialize default library driver
fs.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
if fs.cd == nil {
klog.Fatalln("Failed to initialize CSI driver")
klog.Fatalln("failed to initialize CSI driver")
}
fs.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{

View File

@ -37,15 +37,16 @@ type NodeServer struct {
func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
var (
userCr *credentials
cr *credentials
secrets = req.GetSecrets()
)
secret := req.GetSecrets()
if volOptions.ProvisionVolume {
// The volume is provisioned dynamically, get the credentials directly from Ceph
// First, store admin credentials - those are needed for retrieving the user credentials
adminCr, err := getAdminCredentials(secret)
adminCr, err := getAdminCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
@ -56,27 +57,28 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi
// Then get the ceph user
entity, err := getCephUser(adminCr, volID)
entity, err := getCephUser(volOptions, adminCr, volID)
if err != nil {
return nil, fmt.Errorf("failed to get ceph user: %v", err)
}
userCr = entity.toCredentials()
cr = entity.toCredentials()
} else {
// The volume is pre-made, credentials are in node stage secrets
uCr, err := getUserCredentials(req.GetSecrets())
userCr, err := getUserCredentials(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
}
userCr = uCr
cr = userCr
}
if err := storeCephCredentials(volID, userCr); err != nil {
if err := storeCephCredentials(volID, cr); err != nil {
return nil, fmt.Errorf("failed to store ceph user credentials: %v", err)
}
return userCr, nil
return cr, nil
}
// NodeStageVolume mounts the volume to a staging path on the node.
@ -90,8 +92,7 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
secret := req.GetSecrets()
volOptions, err := newVolumeOptions(req.GetVolumeContext(), secret)
volOptions, err := newVolumeOptions(req.GetVolumeContext(), req.GetSecrets())
if err != nil {
klog.Errorf("error reading volume options for volume %s: %v", volID, err)
return nil, status.Error(codes.InvalidArgument, err.Error())
@ -107,12 +108,6 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
return nil, status.Error(codes.Internal, err.Error())
}
cephConf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volID}
if err = cephConf.writeToFile(); err != nil {
klog.Errorf("failed to write ceph config file to %s for volume %s: %v", getCephConfPath(volID), volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
// Check if the volume is already mounted
isMnt, err := isMountPoint(stagingTargetPath)

View File

@ -104,7 +104,8 @@ type fuseMounter struct{}
func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
args := [...]string{
mountPoint,
"-c", getCephConfPath(volID),
"-m", volOptions.Monitors,
"-c", cephConfigPath,
"-n", cephEntityClientPrefix + cr.id,
"--keyring", getCephKeyringPath(volID, cr.id),
"-r", volOptions.RootPath,