cleanup: Remove support for Delete and Unmounting v1.1.0 PVC

as v1.0.0 is deprecated we need to remove the support
for it in the Next coming (v3.0.0) release. This PR
removes the support for the same.

closes #882

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2020-07-10 16:14:59 +05:30
committed by mergify[bot]
parent a0fd805a8b
commit d15ded88f5
27 changed files with 81 additions and 934 deletions

View File

@ -1,49 +0,0 @@
/*
Copyright 2018 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"github.com/ceph/ceph-csi/internal/util"
)
const (
cephUserPrefix = "user-"
cephEntityClientPrefix = "client."
)
func genUserIDs(adminCr *util.Credentials, volID volumeID) (adminID, userID string) {
return cephEntityClientPrefix + adminCr.ID, cephEntityClientPrefix + getCephUserName(volID)
}
func getCephUserName(volID volumeID) string {
return cephUserPrefix + string(volID)
}
func deleteCephUserDeprecated(ctx context.Context, volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) error {
adminID, userID := genUserIDs(adminCr, volID)
// TODO: Need to return success if userID is not found
return execCommandErr(ctx, "ceph",
"-m", volOptions.Monitors,
"-n", adminID,
"--keyfile="+adminCr.KeyFile,
"-c", util.CephConfigPath,
"auth", "rm", userID,
)
}

View File

@ -33,17 +33,11 @@ import (
// controller server spec.
type ControllerServer struct {
*csicommon.DefaultControllerServer
MetadataStore util.CachePersister
// A map storing all volumes with ongoing operations so that additional operations
// for that same volume (as defined by VolumeID/volume name) return an Aborted error
VolumeLocks *util.VolumeLocks
}
type controllerCacheEntry struct {
VolOptions volumeOptions
VolumeID volumeID
}
// createBackingVolume creates the backing subvolume and on any error cleans up any created entities
func (cs *ControllerServer) createBackingVolume(ctx context.Context, volOptions *volumeOptions, vID *volumeIdentifier, secret map[string]string) error {
cr, err := util.NewAdminCredentials(secret)
@ -156,72 +150,6 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
return &csi.CreateVolumeResponse{Volume: volume}, nil
}
// deleteVolumeDeprecated is used to delete volumes created using version 1.0.0 of the plugin,
// that have state information stored in files or kubernetes config maps
func (cs *ControllerServer) deleteVolumeDeprecated(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
var (
volID = volumeID(req.GetVolumeId())
secrets = req.GetSecrets()
)
ce := &controllerCacheEntry{}
if err := cs.MetadataStore.Get(string(volID), ce); err != nil {
if err, ok := err.(*util.CacheEntryNotFound); ok {
klog.Warningf(util.Log(ctx, "cephfs: metadata for volume %s not found, assuming the volume to be already deleted (%v)"), volID, err)
return &csi.DeleteVolumeResponse{}, nil
}
return nil, status.Error(codes.Internal, err.Error())
}
if !ce.VolOptions.ProvisionVolume {
// DeleteVolume() is forbidden for statically provisioned volumes!
klog.Warningf(util.Log(ctx, "volume %s is provisioned statically, aborting delete"), volID)
return &csi.DeleteVolumeResponse{}, nil
}
// mons may have changed since create volume,
// retrieve the latest mons and override old mons
if mon, secretsErr := util.GetMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
util.ExtendedLog(ctx, "overriding monitors [%q] with [%q] for volume %s", ce.VolOptions.Monitors, mon, volID)
ce.VolOptions.Monitors = mon
}
// Deleting a volume requires admin credentials
cr, err := util.NewAdminCredentials(secrets)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to retrieve admin credentials: %v"), err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
if acquired := cs.VolumeLocks.TryAcquire(string(volID)); !acquired {
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, string(volID))
}
defer cs.VolumeLocks.Release(string(volID))
if err = purgeVolumeDeprecated(ctx, volID, cr, &ce.VolOptions); err != nil {
klog.Errorf(util.Log(ctx, "failed to delete volume %s: %v"), volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
if err = deleteCephUserDeprecated(ctx, &ce.VolOptions, cr, volID); err != nil {
klog.Errorf(util.Log(ctx, "failed to delete ceph user for volume %s: %v"), volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
if err = cs.MetadataStore.Delete(string(volID)); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
util.DebugLog(ctx, "cephfs: successfully deleted volume %s", volID)
return &csi.DeleteVolumeResponse{}, nil
}
// DeleteVolume deletes the volume in backend and its reservation
func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if err := cs.validateDeleteVolumeRequest(); err != nil {
@ -257,12 +185,6 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
return &csi.DeleteVolumeResponse{}, nil
}
// ErrInvalidVolID may mean this is an 1.0.0 version volume
var eivi ErrInvalidVolID
if errors.As(err, &eivi) && cs.MetadataStore != nil {
return cs.deleteVolumeDeprecated(ctx, req)
}
// All errors other than ErrVolumeNotFound should return an error back to the caller
var evnf ErrVolumeNotFound
if !errors.As(err, &evnf) {

View File

@ -37,9 +37,6 @@ const (
radosNamespace = "csi"
)
// PluginFolder defines the location of ceph plugin
var PluginFolder = ""
// Driver contains the default identity,node and controller struct
type Driver struct {
cd *csicommon.CSIDriver
@ -72,10 +69,9 @@ func NewIdentityServer(d *csicommon.CSIDriver) *IdentityServer {
}
// NewControllerServer initialize a controller server for ceph CSI driver
func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersister) *ControllerServer {
func NewControllerServer(d *csicommon.CSIDriver) *ControllerServer {
return &ControllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
MetadataStore: cachePersister,
VolumeLocks: util.NewVolumeLocks(),
}
}
@ -90,13 +86,11 @@ func NewNodeServer(d *csicommon.CSIDriver, t string, topology map[string]string)
// Run start a non-blocking grpc controller,node and identityserver for
// ceph CSI driver which can serve multiple parallel requests
func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
func (fs *Driver) Run(conf *util.Config) {
var err error
var topology map[string]string
// Configuration
PluginFolder = conf.PluginPath
if err = loadAvailableMounters(conf); err != nil {
klog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
}
@ -142,7 +136,7 @@ func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
}
if conf.IsControllerServer {
fs.cs = NewControllerServer(fs.cd, cachePersister)
fs.cs = NewControllerServer(fs.cd)
}
if !conf.IsControllerServer && !conf.IsNodeServer {
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
@ -150,7 +144,7 @@ func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
klog.Fatalln(err)
}
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
fs.cs = NewControllerServer(fs.cd, cachePersister)
fs.cs = NewControllerServer(fs.cd)
}
server := csicommon.NewNonBlockingGRPCServer()

View File

@ -21,7 +21,6 @@ import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"github.com/ceph/ceph-csi/internal/util"
@ -77,11 +76,6 @@ func execCommandJSON(ctx context.Context, v interface{}, program string, args ..
return nil
}
func pathExists(p string) bool {
_, err := os.Stat(p)
return err == nil
}
// Controller service request validation
func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {

View File

@ -19,7 +19,6 @@ package cephfs
import (
"context"
"fmt"
"os"
"path"
"strconv"
"strings"
@ -40,18 +39,14 @@ var (
inValidCommmand = "no valid command found"
)
func getCephRootVolumePathLocalDeprecated(volID volumeID) string {
return path.Join(getCephRootPathLocalDeprecated(volID), "csi-volumes", string(volID))
}
const (
cephEntityClientPrefix = "client."
)
func getVolumeRootPathCephDeprecated(volID volumeID) string {
return path.Join("/", "csi-volumes", string(volID))
}
func getCephRootPathLocalDeprecated(volID volumeID) string {
return fmt.Sprintf("%s/controller/volumes/root-%s", PluginFolder, string(volID))
}
func getVolumeNotFoundErrorString(volID volumeID) string {
return fmt.Sprintf("Error ENOENT: Subvolume '%s' not found", string(volID))
}
@ -201,70 +196,6 @@ func resizeVolume(ctx context.Context, volOptions *volumeOptions, cr *util.Crede
return createVolume(ctx, volOptions, cr, volID, bytesQuota)
}
func mountCephRoot(ctx context.Context, volID volumeID, volOptions *volumeOptions, adminCr *util.Credentials) error {
cephRoot := getCephRootPathLocalDeprecated(volID)
// Root path is not set for dynamically provisioned volumes
// Access to cephfs's / is required
volOptions.RootPath = "/"
if err := util.CreateMountPoint(cephRoot); err != nil {
return err
}
m, err := newMounter(volOptions)
if err != nil {
return fmt.Errorf("failed to create mounter: %v", err)
}
if err = m.mount(ctx, cephRoot, adminCr, volOptions); err != nil {
return fmt.Errorf("error mounting ceph root: %v", err)
}
return nil
}
func unmountCephRoot(ctx context.Context, volID volumeID) {
cephRoot := getCephRootPathLocalDeprecated(volID)
if err := unmountVolume(ctx, cephRoot); err != nil {
klog.Errorf(util.Log(ctx, "failed to unmount %s with error %s"), cephRoot, err)
} else {
if err := os.Remove(cephRoot); err != nil {
klog.Errorf(util.Log(ctx, "failed to remove %s with error %s"), cephRoot, err)
}
}
}
func purgeVolumeDeprecated(ctx context.Context, volID volumeID, adminCr *util.Credentials, volOptions *volumeOptions) error {
if err := mountCephRoot(ctx, volID, volOptions, adminCr); err != nil {
return err
}
defer unmountCephRoot(ctx, volID)
var (
volRoot = getCephRootVolumePathLocalDeprecated(volID)
volRootDeleting = volRoot + "-deleting"
)
if pathExists(volRoot) {
if err := os.Rename(volRoot, volRootDeleting); err != nil {
return fmt.Errorf("couldn't mark volume %s for deletion: %v", volID, err)
}
} else {
if !pathExists(volRootDeleting) {
util.DebugLog(ctx, "cephfs: volume %s not found, assuming it to be already deleted", volID)
return nil
}
}
if err := os.RemoveAll(volRootDeleting); err != nil {
return fmt.Errorf("failed to delete volume %s: %v", volID, err)
}
return nil
}
func purgeVolume(ctx context.Context, volID volumeID, cr *util.Credentials, volOptions *volumeOptions) error {
err := execCommandErr(
ctx,