diff --git a/cephfs/main.go b/cephfs/main.go index 7d53f5515..92f2c0ceb 100644 --- a/cephfs/main.go +++ b/cephfs/main.go @@ -33,7 +33,7 @@ func init() { var ( endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") driverName = flag.String("drivername", "csi-cephfsplugin", "name of the driver") - nodeId = flag.String("nodeid", "", "node id") + nodeID = flag.String("nodeid", "", "node id") volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')") metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") ) @@ -58,7 +58,7 @@ func main() { } driver := cephfs.NewCephFSDriver() - driver.Run(*driverName, *nodeId, *endpoint, *volumeMounter, cp) + driver.Run(*driverName, *nodeID, *endpoint, *volumeMounter, cp) os.Exit(0) } diff --git a/pkg/cephfs/cephconf.go b/pkg/cephfs/cephconf.go index c2e831aae..c340d9f85 100644 --- a/pkg/cephfs/cephconf.go +++ b/pkg/cephfs/cephconf.go @@ -96,31 +96,31 @@ func (d *cephConfigData) writeToFile() error { } type cephKeyringData struct { - UserId, Key string + UserID, Key string VolumeID volumeID } func (d *cephKeyringData) writeToFile() error { - return writeCephTemplate(fmt.Sprintf(cephKeyringFileNameFmt, d.VolumeID, d.UserId), 0600, cephKeyringTempl, d) + return writeCephTemplate(fmt.Sprintf(cephKeyringFileNameFmt, d.VolumeID, d.UserID), 0600, cephKeyringTempl, d) } type cephSecretData struct { - UserId, Key string + UserID, Key string VolumeID volumeID } func (d *cephSecretData) writeToFile() error { - return writeCephTemplate(fmt.Sprintf(cephSecretFileNameFmt, d.VolumeID, d.UserId), 0600, cephSecretTempl, d) + return writeCephTemplate(fmt.Sprintf(cephSecretFileNameFmt, d.VolumeID, d.UserID), 0600, cephSecretTempl, d) } -func getCephSecretPath(volId volumeID, userId string) string { - return path.Join(cephConfigRoot, fmt.Sprintf(cephSecretFileNameFmt, volId, userId)) +func getCephSecretPath(volID volumeID, userID string) string { + return path.Join(cephConfigRoot, fmt.Sprintf(cephSecretFileNameFmt, volID, userID)) } -func getCephKeyringPath(volId volumeID, userId string) string { - return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volId, userId)) +func getCephKeyringPath(volID volumeID, userID string) string { + return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volID, userID)) } -func getCephConfPath(volId volumeID) string { - return path.Join(cephConfigRoot, fmt.Sprintf(cephConfigFileNameFmt, volId)) +func getCephConfPath(volID volumeID) string { + return path.Join(cephConfigRoot, fmt.Sprintf(cephConfigFileNameFmt, volID)) } diff --git a/pkg/cephfs/cephuser.go b/pkg/cephfs/cephuser.go index 57201cc6e..b85dc4de1 100644 --- a/pkg/cephfs/cephuser.go +++ b/pkg/cephfs/cephuser.go @@ -47,16 +47,16 @@ func (ent *cephEntity) toCredentials() *credentials { } } -func getCephUserName(volId volumeID) string { - return cephUserPrefix + string(volId) +func getCephUserName(volID volumeID) string { + return cephUserPrefix + string(volID) } -func getCephUser(adminCr *credentials, volId volumeID) (*cephEntity, error) { - entityName := cephEntityClientPrefix + getCephUserName(volId) +func getCephUser(adminCr *credentials, volID volumeID) (*cephEntity, error) { + entityName := cephEntityClientPrefix + getCephUserName(volID) var ents []cephEntity args := [...]string{ - "auth", "-f", "json", "-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id, + "auth", "-f", "json", "-c", getCephConfPath(volID), "-n", cephEntityClientPrefix + adminCr.id, "get", entityName, } @@ -80,17 +80,17 @@ func getCephUser(adminCr *credentials, volId volumeID) (*cephEntity, error) { return &ents[0], nil } -func createCephUser(volOptions *volumeOptions, adminCr *credentials, volId volumeID) (*cephEntity, error) { +func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) { caps := cephEntityCaps{ - Mds: fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volId)), + Mds: fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volID)), Mon: "allow r", - Osd: fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volId)), + Osd: fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)), } var ents []cephEntity args := [...]string{ - "auth", "-f", "json", "-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id, - "get-or-create", cephEntityClientPrefix + getCephUserName(volId), + "auth", "-f", "json", "-c", getCephConfPath(volID), "-n", cephEntityClientPrefix + adminCr.id, + "get-or-create", cephEntityClientPrefix + getCephUserName(volID), "mds", caps.Mds, "mon", caps.Mon, "osd", caps.Osd, @@ -103,20 +103,20 @@ func createCephUser(volOptions *volumeOptions, adminCr *credentials, volId volum return &ents[0], nil } -func deleteCephUser(adminCr *credentials, volId volumeID) error { - userId := getCephUserName(volId) +func deleteCephUser(adminCr *credentials, volID volumeID) error { + userID := getCephUserName(volID) args := [...]string{ - "-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id, - "auth", "rm", cephEntityClientPrefix + userId, + "-c", getCephConfPath(volID), "-n", cephEntityClientPrefix + adminCr.id, + "auth", "rm", cephEntityClientPrefix + userID, } if err := execCommandAndValidate("ceph", args[:]...); err != nil { return err } - os.Remove(getCephKeyringPath(volId, userId)) - os.Remove(getCephSecretPath(volId, userId)) + os.Remove(getCephKeyringPath(volID, userID)) + os.Remove(getCephSecretPath(volID, userID)) return nil } diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index 7d775ada3..a4129c73c 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -51,10 +51,10 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, status.Error(codes.InvalidArgument, err.Error()) } - volId := makeVolumeID(req.GetName()) - conf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volId} + volID := makeVolumeID(req.GetName()) + conf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volID} if err = conf.writeToFile(); err != nil { - glog.Errorf("failed to write ceph config file to %s: %v", getCephConfPath(volId), err) + glog.Errorf("failed to write ceph config file to %s: %v", getCephConfPath(volID), err) return nil, status.Error(codes.Internal, err.Error()) } @@ -67,35 +67,35 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, status.Error(codes.InvalidArgument, err.Error()) } - if err = storeCephCredentials(volId, cr); err != nil { + if err = storeCephCredentials(volID, cr); err != nil { glog.Errorf("failed to store admin credentials for '%s': %v", cr.id, err) return nil, status.Error(codes.Internal, err.Error()) } - if err = createVolume(volOptions, cr, volId, req.GetCapacityRange().GetRequiredBytes()); err != nil { + if err = createVolume(volOptions, cr, volID, req.GetCapacityRange().GetRequiredBytes()); err != nil { glog.Errorf("failed to create volume %s: %v", req.GetName(), err) return nil, status.Error(codes.Internal, err.Error()) } - if _, err = createCephUser(volOptions, cr, volId); err != nil { + if _, err = createCephUser(volOptions, cr, volID); err != nil { glog.Errorf("failed to create ceph user for volume %s: %v", req.GetName(), err) return nil, status.Error(codes.Internal, err.Error()) } - glog.Infof("cephfs: successfully created volume %s", volId) + glog.Infof("cephfs: successfully created volume %s", volID) } else { - glog.Infof("cephfs: volume %s is provisioned statically", volId) + glog.Infof("cephfs: volume %s is provisioned statically", volID) } - ce := &controllerCacheEntry{VolOptions: *volOptions, VolumeID: volId} - if err := cs.MetadataStore.Create(string(volId), ce); err != nil { - glog.Errorf("failed to store a cache entry for volume %s: %v", volId, err) + ce := &controllerCacheEntry{VolOptions: *volOptions, VolumeID: volID} + if err := cs.MetadataStore.Create(string(volID), ce); err != nil { + glog.Errorf("failed to store a cache entry for volume %s: %v", volID, err) return nil, status.Error(codes.Internal, err.Error()) } return &csi.CreateVolumeResponse{ Volume: &csi.Volume{ - VolumeId: string(volId), + VolumeId: string(volID), CapacityBytes: req.GetCapacityRange().GetRequiredBytes(), VolumeContext: req.GetParameters(), }, @@ -109,19 +109,19 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol } var ( - volId = volumeID(req.GetVolumeId()) + volID = volumeID(req.GetVolumeId()) err error ) ce := &controllerCacheEntry{} - if err := cs.MetadataStore.Get(string(volId), ce); err != nil { + if err := cs.MetadataStore.Get(string(volID), ce); err != nil { return nil, status.Error(codes.Internal, err.Error()) } if !ce.VolOptions.ProvisionVolume { // DeleteVolume() is forbidden for statically provisioned volumes! - glog.Warningf("volume %s is provisioned statically, aborting delete", volId) + glog.Warningf("volume %s is provisioned statically, aborting delete", volID) return &csi.DeleteVolumeResponse{}, nil } // mons may have changed since create volume, @@ -140,21 +140,21 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol return nil, status.Error(codes.InvalidArgument, err.Error()) } - if err = purgeVolume(volId, cr, &ce.VolOptions); err != nil { - glog.Errorf("failed to delete volume %s: %v", volId, err) + if err = purgeVolume(volID, cr, &ce.VolOptions); err != nil { + glog.Errorf("failed to delete volume %s: %v", volID, err) return nil, status.Error(codes.Internal, err.Error()) } - if err = deleteCephUser(cr, volId); err != nil { - glog.Errorf("failed to delete ceph user for volume %s: %v", volId, err) + if err = deleteCephUser(cr, volID); err != nil { + glog.Errorf("failed to delete ceph user for volume %s: %v", volID, err) return nil, status.Error(codes.Internal, err.Error()) } - if err := cs.MetadataStore.Delete(string(volId)); err != nil { + if err := cs.MetadataStore.Delete(string(volID)); err != nil { return nil, status.Error(codes.Internal, err.Error()) } - glog.Infof("cephfs: successfully deleted volume %s", volId) + glog.Infof("cephfs: successfully deleted volume %s", volID) return &csi.DeleteVolumeResponse{}, nil } diff --git a/pkg/cephfs/credentials.go b/pkg/cephfs/credentials.go index 343e9a12d..0cf866ce5 100644 --- a/pkg/cephfs/credentials.go +++ b/pkg/cephfs/credentials.go @@ -19,9 +19,9 @@ package cephfs import "fmt" const ( - credUserId = "userID" + credUserID = "userID" credUserKey = "userKey" - credAdminId = "adminID" + credAdminID = "adminID" credAdminKey = "adminKey" credMonitors = "monitors" ) @@ -49,11 +49,11 @@ func getCredentials(idField, keyField string, secrets map[string]string) (*crede } func getUserCredentials(secrets map[string]string) (*credentials, error) { - return getCredentials(credUserId, credUserKey, secrets) + return getCredentials(credUserID, credUserKey, secrets) } func getAdminCredentials(secrets map[string]string) (*credentials, error) { - return getCredentials(credAdminId, credAdminKey, secrets) + return getCredentials(credAdminID, credAdminKey, secrets) } func getMonValFromSecret(secrets map[string]string) (string, error) { diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index e33f38f8b..08f15a89a 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -33,7 +33,7 @@ type nodeServer struct { *csicommon.DefaultNodeServer } -func getCredentialsForVolume(volOptions *volumeOptions, volId volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) { +func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) { var ( userCr *credentials err error @@ -49,13 +49,13 @@ func getCredentialsForVolume(volOptions *volumeOptions, volId volumeID, req *csi return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err) } - if err = storeCephCredentials(volId, adminCr); err != nil { + if err = storeCephCredentials(volID, adminCr); err != nil { return nil, fmt.Errorf("failed to store ceph admin credentials: %v", err) } // Then get the ceph user - entity, err := getCephUser(adminCr, volId) + entity, err := getCephUser(adminCr, volID) if err != nil { return nil, fmt.Errorf("failed to get ceph user: %v", err) } @@ -70,7 +70,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, volId volumeID, req *csi } } - if err = storeCephCredentials(volId, userCr); err != nil { + if err = storeCephCredentials(volID, userCr); err != nil { return nil, fmt.Errorf("failed to store ceph user credentials: %v", err) } @@ -85,28 +85,28 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol // Configuration stagingTargetPath := req.GetStagingTargetPath() - volId := volumeID(req.GetVolumeId()) + volID := volumeID(req.GetVolumeId()) secret := req.GetSecrets() volOptions, err := newVolumeOptions(req.GetVolumeContext(), secret) if err != nil { - glog.Errorf("error reading volume options for volume %s: %v", volId, err) + glog.Errorf("error reading volume options for volume %s: %v", volID, err) return nil, status.Error(codes.InvalidArgument, err.Error()) } if volOptions.ProvisionVolume { // Dynamically provisioned volumes don't have their root path set, do it here - volOptions.RootPath = getVolumeRootPathCeph(volId) + volOptions.RootPath = getVolumeRootPathCeph(volID) } if err = createMountPoint(stagingTargetPath); err != nil { - glog.Errorf("failed to create staging mount point at %s for volume %s: %v", stagingTargetPath, volId, err) + glog.Errorf("failed to create staging mount point at %s for volume %s: %v", stagingTargetPath, volID, err) return nil, status.Error(codes.Internal, err.Error()) } - cephConf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volId} + cephConf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volID} if err = cephConf.writeToFile(); err != nil { - glog.Errorf("failed to write ceph config file to %s for volume %s: %v", getCephConfPath(volId), volId, err) + glog.Errorf("failed to write ceph config file to %s for volume %s: %v", getCephConfPath(volID), volID, err) return nil, status.Error(codes.Internal, err.Error()) } @@ -120,31 +120,31 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol } if isMnt { - glog.Infof("cephfs: volume %s is already mounted to %s, skipping", volId, stagingTargetPath) + glog.Infof("cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath) return &csi.NodeStageVolumeResponse{}, nil } // It's not, mount now - cr, err := getCredentialsForVolume(volOptions, volId, req) + cr, err := getCredentialsForVolume(volOptions, volID, req) if err != nil { - glog.Errorf("failed to get ceph credentials for volume %s: %v", volId, err) + glog.Errorf("failed to get ceph credentials for volume %s: %v", volID, err) return nil, status.Error(codes.Internal, err.Error()) } m, err := newMounter(volOptions) if err != nil { - glog.Errorf("failed to create mounter for volume %s: %v", volId, err) + glog.Errorf("failed to create mounter for volume %s: %v", volID, err) } - glog.V(4).Infof("cephfs: mounting volume %s with %s", volId, m.name()) + glog.V(4).Infof("cephfs: mounting volume %s with %s", volID, m.name()) - if err = m.mount(stagingTargetPath, cr, volOptions, volId); err != nil { - glog.Errorf("failed to mount volume %s: %v", volId, err) + if err = m.mount(stagingTargetPath, cr, volOptions, volID); err != nil { + glog.Errorf("failed to mount volume %s: %v", volID, err) return nil, status.Error(codes.Internal, err.Error()) } - glog.Infof("cephfs: successfully mounted volume %s to %s", volId, stagingTargetPath) + glog.Infof("cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath) return &csi.NodeStageVolumeResponse{}, nil } @@ -157,7 +157,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis // Configuration targetPath := req.GetTargetPath() - volId := req.GetVolumeId() + volID := req.GetVolumeId() if err := createMountPoint(targetPath); err != nil { glog.Errorf("failed to create mount point at %s: %v", targetPath, err) @@ -174,18 +174,18 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis } if isMnt { - glog.Infof("cephfs: volume %s is already bind-mounted to %s", volId, targetPath) + glog.Infof("cephfs: volume %s is already bind-mounted to %s", volID, targetPath) return &csi.NodePublishVolumeResponse{}, nil } // It's not, mount now if err = bindMount(req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly()); err != nil { - glog.Errorf("failed to bind-mount volume %s: %v", volId, err) + glog.Errorf("failed to bind-mount volume %s: %v", volID, err) return nil, status.Error(codes.Internal, err.Error()) } - glog.Infof("cephfs: successfully bind-mounted volume %s to %s", volId, targetPath) + glog.Infof("cephfs: successfully bind-mounted volume %s to %s", volID, targetPath) return &csi.NodePublishVolumeResponse{}, nil } diff --git a/pkg/cephfs/util.go b/pkg/cephfs/util.go index d5b8a1360..5fb418ca6 100644 --- a/pkg/cephfs/util.go +++ b/pkg/cephfs/util.go @@ -75,11 +75,11 @@ func isMountPoint(p string) (bool, error) { return !notMnt, nil } -func storeCephCredentials(volId volumeID, cr *credentials) error { +func storeCephCredentials(volID volumeID, cr *credentials) error { keyringData := cephKeyringData{ - UserId: cr.id, + UserID: cr.id, Key: cr.key, - VolumeID: volId, + VolumeID: volID, } if err := keyringData.writeToFile(); err != nil { @@ -87,9 +87,9 @@ func storeCephCredentials(volId volumeID, cr *credentials) error { } secret := cephSecretData{ - UserId: cr.id, + UserID: cr.id, Key: cr.key, - VolumeID: volId, + VolumeID: volID, } if err := secret.writeToFile(); err != nil { diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index a92b70ea3..ce6f01a17 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -29,28 +29,28 @@ const ( namespacePrefix = "ns-" ) -func getCephRootPathLocal(volId volumeID) string { - return cephRootPrefix + string(volId) +func getCephRootPathLocal(volID volumeID) string { + return cephRootPrefix + string(volID) } -func getCephRootVolumePathLocal(volId volumeID) string { - return path.Join(getCephRootPathLocal(volId), cephVolumesRoot, string(volId)) +func getCephRootVolumePathLocal(volID volumeID) string { + return path.Join(getCephRootPathLocal(volID), cephVolumesRoot, string(volID)) } -func getVolumeRootPathCeph(volId volumeID) string { - return path.Join("/", cephVolumesRoot, string(volId)) +func getVolumeRootPathCeph(volID volumeID) string { + return path.Join("/", cephVolumesRoot, string(volID)) } -func getVolumeNamespace(volId volumeID) string { - return namespacePrefix + string(volId) +func getVolumeNamespace(volID volumeID) string { + return namespacePrefix + string(volID) } func setVolumeAttribute(root, attrName, attrValue string) error { return execCommandAndValidate("setfattr", "-n", attrName, "-v", attrValue, root) } -func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeID, bytesQuota int64) error { - cephRoot := getCephRootPathLocal(volId) +func createVolume(volOptions *volumeOptions, adminCr *credentials, volID volumeID, bytesQuota int64) error { + cephRoot := getCephRootPathLocal(volID) if err := createMountPoint(cephRoot); err != nil { return err @@ -65,7 +65,7 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI return fmt.Errorf("failed to create mounter: %v", err) } - if err = m.mount(cephRoot, adminCr, volOptions, volId); err != nil { + if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil { return fmt.Errorf("error mounting ceph root: %v", err) } @@ -74,8 +74,8 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI os.Remove(cephRoot) }() - volOptions.RootPath = getVolumeRootPathCeph(volId) - localVolRoot := getCephRootVolumePathLocal(volId) + volOptions.RootPath = getVolumeRootPathCeph(volID) + localVolRoot := getCephRootVolumePathLocal(volID) if err := createMountPoint(localVolRoot); err != nil { return err @@ -91,17 +91,17 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI return fmt.Errorf("%v\ncephfs: Does pool '%s' exist?", err, volOptions.Pool) } - if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volId)); err != nil { + if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volID)); err != nil { return err } return nil } -func purgeVolume(volId volumeID, adminCr *credentials, volOptions *volumeOptions) error { +func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions) error { var ( - cephRoot = getCephRootPathLocal(volId) - volRoot = getCephRootVolumePathLocal(volId) + cephRoot = getCephRootPathLocal(volID) + volRoot = getCephRootVolumePathLocal(volID) volRootDeleting = volRoot + "-deleting" ) @@ -118,7 +118,7 @@ func purgeVolume(volId volumeID, adminCr *credentials, volOptions *volumeOptions return fmt.Errorf("failed to create mounter: %v", err) } - if err = m.mount(cephRoot, adminCr, volOptions, volId); err != nil { + if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil { return fmt.Errorf("error mounting ceph root: %v", err) } @@ -128,11 +128,11 @@ func purgeVolume(volId volumeID, adminCr *credentials, volOptions *volumeOptions }() if err := os.Rename(volRoot, volRootDeleting); err != nil { - return fmt.Errorf("coudln't mark volume %s for deletion: %v", volId, err) + return fmt.Errorf("coudln't mark volume %s for deletion: %v", volID, err) } if err := os.RemoveAll(volRootDeleting); err != nil { - return fmt.Errorf("failed to delete volume %s: %v", volId, err) + return fmt.Errorf("failed to delete volume %s: %v", volID, err) } return nil diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index fa67152c7..59500fdc9 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -133,7 +133,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol volSizeGB := int(volSizeBytes / 1024 / 1024 / 1024) // Check if there is already RBD image with requested name - found, _, _ := rbdStatus(rbdVol, rbdVol.UserId, req.GetSecrets()) + found, _, _ := rbdStatus(rbdVol, rbdVol.UserID, req.GetSecrets()) if !found { // if VolumeContentSource is not nil, this request is for snapshot if req.VolumeContentSource != nil { @@ -141,7 +141,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, err } } else { - err := createRBDImage(rbdVol, volSizeGB, rbdVol.AdminId, req.GetSecrets()) + err := createRBDImage(rbdVol, volSizeGB, rbdVol.AdminID, req.GetSecrets()) if err != nil { glog.Warningf("failed to create volume: %v", err) return nil, err @@ -152,7 +152,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol } if err := cs.MetadataStore.Create(volumeID, rbdVol); err != nil { glog.Warningf("failed to store volume metadata with error: %v", err) - if err := deleteRBDImage(rbdVol, rbdVol.AdminId, req.GetSecrets()); err != nil { + if err := deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil { glog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err) return nil, err } @@ -185,7 +185,7 @@ func (cs *controllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol * return err } - err := restoreSnapshot(rbdVol, rbdSnap, rbdVol.AdminId, req.GetSecrets()) + err := restoreSnapshot(rbdVol, rbdSnap, rbdVol.AdminID, req.GetSecrets()) if err != nil { return err } @@ -212,7 +212,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol volName := rbdVol.VolName // Deleting rbd image glog.V(4).Infof("deleting volume %s", volName) - if err := deleteRBDImage(rbdVol, rbdVol.AdminId, req.GetSecrets()); err != nil { + if err := deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil { // TODO: can we detect "already deleted" situations here and proceed? glog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, volName, err) return nil, err @@ -306,7 +306,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS rbdSnap.SourceVolumeID = req.GetSourceVolumeId() rbdSnap.SizeBytes = rbdVolume.VolSize - err = createSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()) + err = createSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()) // if we already have the snapshot, return the snapshot if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { @@ -327,10 +327,10 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS } } else { glog.V(4).Infof("create snapshot %s", snapName) - err = protectSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()) + err = protectSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()) if err != nil { - err = deleteSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()) + err = deleteSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()) if err != nil { return nil, fmt.Errorf("snapshot is created but failed to protect and delete snapshot: %v", err) } @@ -343,13 +343,13 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS if err := cs.MetadataStore.Create(snapshotID, rbdSnap); err != nil { glog.Warningf("rbd: failed to store snapInfo with error: %v", err) // Unprotect snapshot - err := unprotectSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()) + err := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()) if err != nil { return nil, status.Errorf(codes.Unknown, "This Snapshot should be removed but failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err) } // Deleting snapshot glog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName) - if err := deleteSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()); err != nil { + if err := deleteSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()); err != nil { return nil, status.Errorf(codes.Unknown, "This Snapshot should be removed but failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err) } return nil, err @@ -388,14 +388,14 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS } // Unprotect snapshot - err := unprotectSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()) + err := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()) if err != nil { return nil, status.Errorf(codes.FailedPrecondition, "failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err) } // Deleting snapshot glog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName) - if err := deleteSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()); err != nil { + if err := deleteSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()); err != nil { return nil, status.Errorf(codes.FailedPrecondition, "failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err) } diff --git a/pkg/rbd/nodeserver.go b/pkg/rbd/nodeserver.go index 2eeaf0530..14ddbbbb2 100644 --- a/pkg/rbd/nodeserver.go +++ b/pkg/rbd/nodeserver.go @@ -97,7 +97,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis } volOptions.VolName = volName // Mapping RBD image - devicePath, err := attachRBDImage(volOptions, volOptions.UserId, req.GetSecrets()) + devicePath, err := attachRBDImage(volOptions, volOptions.UserID, req.GetSecrets()) if err != nil { return nil, err } diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index 1c20cbcae..e5451a260 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -31,8 +31,8 @@ import ( // PluginFolder defines the location of rbdplugin const ( PluginFolder = "/var/lib/kubelet/plugins/csi-rbdplugin" - rbdDefaultAdminId = "admin" - rbdDefaultUserId = rbdDefaultAdminId + rbdDefaultAdminID = "admin" + rbdDefaultUserID = rbdDefaultAdminID ) type rbd struct { diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index ce6b3721f..4e69fa09f 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -48,8 +48,8 @@ type rbdVolume struct { ImageFormat string `json:"imageFormat"` ImageFeatures string `json:"imageFeatures"` VolSize int64 `json:"volSize"` - AdminId string `json:"adminId"` - UserId string `json:"userId"` + AdminID string `json:"adminId"` + UserID string `json:"userId"` Mounter string `json:"mounter"` } @@ -63,8 +63,8 @@ type rbdSnapshot struct { Pool string `json:"pool"` CreatedAt int64 `json:"createdAt"` SizeBytes int64 `json:"sizeBytes"` - AdminId string `json:"adminId"` - UserId string `json:"userId"` + AdminID string `json:"adminId"` + UserID string `json:"userId"` } var ( @@ -111,7 +111,7 @@ func getMon(pOpts *rbdVolume, credentials map[string]string) (string, error) { } // CreateImage creates a new ceph image with provision and volume options. -func createRBDImage(pOpts *rbdVolume, volSz int, adminId string, credentials map[string]string) error { +func createRBDImage(pOpts *rbdVolume, volSz int, adminID string, credentials map[string]string) error { var output []byte mon, err := getMon(pOpts, credentials) @@ -122,16 +122,16 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminId string, credentials map image := pOpts.VolName volSzGB := fmt.Sprintf("%dG", volSz) - key, err := getRBDKey(adminId, credentials) + key, err := getRBDKey(adminID, credentials) if err != nil { return err } if pOpts.ImageFormat == rbdImageFormat2 { - glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool, adminId, key) + glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool, adminID, key) } else { - glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, mon, pOpts.Pool, adminId, key) + glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, mon, pOpts.Pool, adminID, key) } - args := []string{"create", image, "--size", volSzGB, "--pool", pOpts.Pool, "--id", adminId, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat} + args := []string{"create", image, "--size", volSzGB, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat} if pOpts.ImageFormat == rbdImageFormat2 { args = append(args, "--image-feature", pOpts.ImageFeatures) } @@ -146,14 +146,14 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminId string, credentials map // rbdStatus checks if there is watcher on the image. // It returns true if there is a watcher onthe image, otherwise returns false. -func rbdStatus(pOpts *rbdVolume, userId string, credentials map[string]string) (bool, string, error) { +func rbdStatus(pOpts *rbdVolume, userID string, credentials map[string]string) (bool, string, error) { var output string var cmd []byte image := pOpts.VolName // If we don't have admin id/secret (e.g. attaching), fallback to user id/secret. - key, err := getRBDKey(userId, credentials) + key, err := getRBDKey(userID, credentials) if err != nil { return false, "", err } @@ -163,8 +163,8 @@ func rbdStatus(pOpts *rbdVolume, userId string, credentials map[string]string) ( return false, "", err } - glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, userId, key) - args := []string{"status", image, "--pool", pOpts.Pool, "-m", mon, "--id", userId, "--key=" + key} + glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, userID, key) + args := []string{"status", image, "--pool", pOpts.Pool, "-m", mon, "--id", userID, "--key=" + key} cmd, err = execCommand("rbd", args) output = string(cmd) @@ -190,10 +190,10 @@ func rbdStatus(pOpts *rbdVolume, userId string, credentials map[string]string) ( } // DeleteImage deletes a ceph image with provision and volume options. -func deleteRBDImage(pOpts *rbdVolume, adminId string, credentials map[string]string) error { +func deleteRBDImage(pOpts *rbdVolume, adminID string, credentials map[string]string) error { var output []byte image := pOpts.VolName - found, _, err := rbdStatus(pOpts, adminId, credentials) + found, _, err := rbdStatus(pOpts, adminID, credentials) if err != nil { return err } @@ -201,7 +201,7 @@ func deleteRBDImage(pOpts *rbdVolume, adminId string, credentials map[string]str glog.Info("rbd is still being used ", image) return fmt.Errorf("rbd %s is still being used", image) } - key, err := getRBDKey(adminId, credentials) + key, err := getRBDKey(adminID, credentials) if err != nil { return err } @@ -210,8 +210,8 @@ func deleteRBDImage(pOpts *rbdVolume, adminId string, credentials map[string]str return err } - glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key) - args := []string{"rm", image, "--pool", pOpts.Pool, "--id", adminId, "-m", mon, "--key=" + key} + glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminID, key) + args := []string{"rm", image, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key} output, err = execCommand("rbd", args) if err == nil { return nil @@ -258,13 +258,13 @@ func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) { } } - rbdVol.AdminId, ok = volOptions["adminid"] + rbdVol.AdminID, ok = volOptions["adminid"] if !ok { - rbdVol.AdminId = rbdDefaultAdminId + rbdVol.AdminID = rbdDefaultAdminID } - rbdVol.UserId, ok = volOptions["userid"] + rbdVol.UserID, ok = volOptions["userid"] if !ok { - rbdVol.UserId = rbdDefaultUserId + rbdVol.UserID = rbdDefaultUserID } rbdVol.Mounter, ok = volOptions["mounter"] if !ok { @@ -287,13 +287,13 @@ func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) return nil, fmt.Errorf("Either monitors or monValueFromSecret must be set") } } - rbdSnap.AdminId, ok = snapOptions["adminid"] + rbdSnap.AdminID, ok = snapOptions["adminid"] if !ok { - rbdSnap.AdminId = rbdDefaultAdminId + rbdSnap.AdminID = rbdDefaultAdminID } - rbdSnap.UserId, ok = snapOptions["userid"] + rbdSnap.UserID, ok = snapOptions["userid"] if !ok { - rbdSnap.UserId = rbdDefaultUserId + rbdSnap.UserID = rbdDefaultUserID } return rbdSnap, nil @@ -351,13 +351,13 @@ func getSnapMon(pOpts *rbdSnapshot, credentials map[string]string) (string, erro return mon, nil } -func protectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]string) error { +func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { var output []byte image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(adminId, credentials) + key, err := getRBDKey(adminID, credentials) if err != nil { return err } @@ -366,8 +366,8 @@ func protectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string] return err } - glog.V(4).Infof("rbd: snap protect %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key) - args := []string{"snap", "protect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminId, "-m", mon, "--key=" + key} + glog.V(4).Infof("rbd: snap protect %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminID, key) + args := []string{"snap", "protect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key} output, err = execCommand("rbd", args) @@ -378,7 +378,7 @@ func protectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string] return nil } -func createSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]string) error { +func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { var output []byte mon, err := getSnapMon(pOpts, credentials) @@ -389,12 +389,12 @@ func createSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]s image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(adminId, credentials) + key, err := getRBDKey(adminID, credentials) if err != nil { return err } - glog.V(4).Infof("rbd: snap create %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key) - args := []string{"snap", "create", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminId, "-m", mon, "--key=" + key} + glog.V(4).Infof("rbd: snap create %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminID, key) + args := []string{"snap", "create", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key} output, err = execCommand("rbd", args) @@ -405,7 +405,7 @@ func createSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]s return nil } -func unprotectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]string) error { +func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { var output []byte mon, err := getSnapMon(pOpts, credentials) @@ -416,12 +416,12 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[strin image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(adminId, credentials) + key, err := getRBDKey(adminID, credentials) if err != nil { return err } - glog.V(4).Infof("rbd: snap unprotect %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key) - args := []string{"snap", "unprotect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminId, "-m", mon, "--key=" + key} + glog.V(4).Infof("rbd: snap unprotect %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminID, key) + args := []string{"snap", "unprotect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key} output, err = execCommand("rbd", args) @@ -432,7 +432,7 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[strin return nil } -func deleteSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]string) error { +func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { var output []byte mon, err := getSnapMon(pOpts, credentials) @@ -443,12 +443,12 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]s image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(adminId, credentials) + key, err := getRBDKey(adminID, credentials) if err != nil { return err } - glog.V(4).Infof("rbd: snap rm %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key) - args := []string{"snap", "rm", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminId, "-m", mon, "--key=" + key} + glog.V(4).Infof("rbd: snap rm %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminID, key) + args := []string{"snap", "rm", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key} output, err = execCommand("rbd", args) @@ -459,7 +459,7 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]s return nil } -func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminId string, credentials map[string]string) error { +func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string, credentials map[string]string) error { var output []byte mon, err := getMon(pVolOpts, credentials) @@ -470,12 +470,12 @@ func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminId string image := pVolOpts.VolName snapID := pSnapOpts.SnapID - key, err := getRBDKey(adminId, credentials) + key, err := getRBDKey(adminID, credentials) if err != nil { return err } - glog.V(4).Infof("rbd: clone %s using mon %s, pool %s id %s key %s", image, mon, pVolOpts.Pool, adminId, key) - args := []string{"clone", pSnapOpts.Pool + "/" + pSnapOpts.VolName + "@" + snapID, pVolOpts.Pool + "/" + image, "--id", adminId, "-m", mon, "--key=" + key} + glog.V(4).Infof("rbd: clone %s using mon %s, pool %s id %s key %s", image, mon, pVolOpts.Pool, adminID, key) + args := []string{"clone", pSnapOpts.Pool + "/" + pSnapOpts.VolName + "@" + snapID, pVolOpts.Pool + "/" + image, "--id", adminID, "-m", mon, "--key=" + key} output, err = execCommand("rbd", args)