cephfs: don't need to store keyrings anymore

This commit is contained in:
gman 2019-02-14 11:48:52 +01:00 committed by mergify[bot]
parent 8e371f62fa
commit 96bf4a98bd
3 changed files with 3 additions and 97 deletions

View File

@ -17,13 +17,8 @@ limitations under the License.
package cephfs
import (
"fmt"
"io/ioutil"
"os"
"path"
"text/template"
"k8s.io/klog"
)
var cephConfig = []byte(`[global]
@ -35,39 +30,11 @@ auth_client_required = cephx
fuse_set_user_groups = false
`)
const cephKeyring = `[client.{{.UserID}}]
key = {{.Key}}
`
const cephSecret = `{{.Key}}` // #nosec
const (
cephConfigRoot = "/etc/ceph"
cephConfigPath = "/etc/ceph/ceph.conf"
cephKeyringFileNameFmt = "ceph.share.%s.client.%s.keyring"
cephSecretFileNameFmt = "ceph.share.%s.client.%s.secret" // #nosec
cephConfigRoot = "/etc/ceph"
cephConfigPath = "/etc/ceph/ceph.conf"
)
var (
cephKeyringTempl *template.Template
cephSecretTempl *template.Template
)
func init() {
fm := map[string]interface{}{
"perms": func(readOnly bool) string {
if readOnly {
return "r"
}
return "rw"
},
}
cephKeyringTempl = template.Must(template.New("keyring").Funcs(fm).Parse(cephKeyring))
cephSecretTempl = template.Must(template.New("secret").Parse(cephSecret))
}
func createCephConfigRoot() error {
return os.MkdirAll(cephConfigRoot, 0755) // #nosec
}
@ -79,51 +46,3 @@ func writeCephConfig() error {
return ioutil.WriteFile(cephConfigPath, cephConfig, 0640)
}
func writeCephTemplate(fileName string, m os.FileMode, t *template.Template, data interface{}) error {
if err := createCephConfigRoot(); err != nil {
return err
}
f, err := os.OpenFile(path.Join(cephConfigRoot, fileName), os.O_CREATE|os.O_EXCL|os.O_WRONLY, m)
if err != nil {
if os.IsExist(err) {
return nil
}
return err
}
defer func() {
if err := f.Close(); err != nil {
klog.Errorf("failed to close file %s with error %s", f.Name(), err)
}
}()
return t.Execute(f, data)
}
type cephKeyringData struct {
UserID, Key string
VolumeID volumeID
}
func (d *cephKeyringData) writeToFile() error {
return writeCephTemplate(fmt.Sprintf(cephKeyringFileNameFmt, d.VolumeID, d.UserID), 0600, cephKeyringTempl, d)
}
type cephSecretData struct {
UserID, Key string
VolumeID volumeID
}
func (d *cephSecretData) writeToFile() error {
return writeCephTemplate(fmt.Sprintf(cephSecretFileNameFmt, d.VolumeID, d.UserID), 0600, cephSecretTempl, d)
}
func getCephSecretPath(volID volumeID, userID string) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephSecretFileNameFmt, volID, userID))
}
func getCephKeyringPath(volID volumeID, userID string) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volID, userID))
}

View File

@ -67,11 +67,6 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if err = storeCephCredentials(volID, cr); err != nil {
klog.Errorf("failed to store admin credentials for '%s': %v", cr.id, err)
return nil, status.Error(codes.Internal, err.Error())
}
if err = createVolume(volOptions, cr, volID, req.GetCapacityRange().GetRequiredBytes()); err != nil {
klog.Errorf("failed to create volume %s: %v", req.GetName(), err)
return nil, status.Error(codes.Internal, err.Error())

View File

@ -51,10 +51,6 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
if err = storeCephCredentials(volID, adminCr); err != nil {
return nil, fmt.Errorf("failed to store ceph admin credentials: %v", err)
}
// Then get the ceph user
entity, err := getCephUser(volOptions, adminCr, volID)
@ -74,10 +70,6 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi
cr = userCr
}
if err := storeCephCredentials(volID, cr); err != nil {
return nil, fmt.Errorf("failed to store ceph user credentials: %v", err)
}
return cr, nil
}
@ -241,7 +233,7 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
return nil, status.Error(codes.Internal, err.Error())
}
klog.Infof("cephfs: successfully umounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
klog.Infof("cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil
}