2018-01-09 18:59:50 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package rbd
|
|
|
|
|
|
|
|
import (
|
2019-08-24 09:14:15 +00:00
|
|
|
"context"
|
2020-06-25 08:35:19 +00:00
|
|
|
"errors"
|
2021-07-22 10:12:19 +00:00
|
|
|
"fmt"
|
2020-11-09 09:07:47 +00:00
|
|
|
"strconv"
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2020-04-17 09:23:49 +00:00
|
|
|
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2019-02-18 11:30:28 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
librbd "github.com/ceph/go-ceph/rbd"
|
2019-01-15 16:20:41 +00:00
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
2019-02-08 07:50:21 +00:00
|
|
|
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
|
2018-03-06 22:33:57 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
2018-01-09 18:59:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
oneGB = 1073741824
|
|
|
|
)
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// ControllerServer struct of rbd CSI driver with supported methods of CSI
|
|
|
|
// controller server spec.
|
2019-01-17 07:51:06 +00:00
|
|
|
type ControllerServer struct {
|
2018-01-09 18:59:50 +00:00
|
|
|
*csicommon.DefaultControllerServer
|
2019-09-12 04:53:37 +00:00
|
|
|
// A map storing all volumes with ongoing operations so that additional operations
|
|
|
|
// for that same volume (as defined by VolumeID/volume name) return an Aborted error
|
|
|
|
VolumeLocks *util.VolumeLocks
|
|
|
|
|
|
|
|
// A map storing all volumes with ongoing operations so that additional operations
|
|
|
|
// for that same snapshot (as defined by SnapshotID/snapshot name) return an Aborted error
|
|
|
|
SnapshotLocks *util.VolumeLocks
|
2020-07-13 05:28:17 +00:00
|
|
|
|
|
|
|
// A map storing all volumes/snapshots with ongoing operations.
|
|
|
|
OperationLocks *util.OperationLock
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func (cs *ControllerServer) validateVolumeReq(ctx context.Context, req *csi.CreateVolumeRequest) error {
|
2021-06-25 11:39:42 +00:00
|
|
|
if err := cs.Driver.ValidateControllerServiceRequest(
|
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid create volume req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-17 05:27:55 +00:00
|
|
|
return err
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2018-03-06 22:33:57 +00:00
|
|
|
// Check sanity of request Name, Volume Capabilities
|
2019-06-10 06:48:41 +00:00
|
|
|
if req.Name == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "volume Name cannot be empty")
|
2018-03-06 22:33:57 +00:00
|
|
|
}
|
|
|
|
if req.VolumeCapabilities == nil {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "volume Capabilities cannot be empty")
|
2018-03-06 22:33:57 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
options := req.GetParameters()
|
2019-06-10 06:48:41 +00:00
|
|
|
if value, ok := options["clusterID"]; !ok || value == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "missing or empty cluster ID to provision volume from")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2019-06-10 06:48:41 +00:00
|
|
|
if value, ok := options["pool"]; !ok || value == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "missing or empty pool name to provision volume from")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2019-09-10 09:56:08 +00:00
|
|
|
|
|
|
|
if value, ok := options["dataPool"]; ok && value == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty datapool name to provision volume from")
|
|
|
|
}
|
2020-08-18 10:42:42 +00:00
|
|
|
if value, ok := options["radosNamespace"]; ok && value == "" {
|
2020-06-01 13:57:51 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "empty namespace name to provision volume from")
|
|
|
|
}
|
2020-02-24 13:19:42 +00:00
|
|
|
if value, ok := options["volumeNamePrefix"]; ok && value == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty volume name prefix to provision volume from")
|
|
|
|
}
|
2020-08-17 08:08:57 +00:00
|
|
|
|
|
|
|
// Allow readonly access mode for volume with content source
|
|
|
|
err := util.CheckReadOnlyManyIsSupported(req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-01-17 05:27:55 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-16 01:52:28 +00:00
|
|
|
|
2021-11-19 04:40:12 +00:00
|
|
|
// parseVolCreateRequest take create volume `request` argument and make use of the
|
|
|
|
// request arguments for subsequent calls.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) parseVolCreateRequest(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.CreateVolumeRequest) (*rbdVolume, error) {
|
2019-01-29 05:49:16 +00:00
|
|
|
// TODO (sbezverk) Last check for not exceeding total storage capacity
|
|
|
|
|
2021-12-09 16:23:02 +00:00
|
|
|
// RO modes need to be handled independently (ie right now even if access mode is RO, they'll be RW upon attach)
|
|
|
|
isBlock, isMultiNode := csicommon.IsBlockMultiNode(req.VolumeCapabilities)
|
2019-03-14 00:18:04 +00:00
|
|
|
|
|
|
|
// We want to fail early if the user is trying to create a RWX on a non-block type device
|
|
|
|
if isMultiNode && !isBlock {
|
2021-06-25 11:39:42 +00:00
|
|
|
return nil, status.Error(
|
|
|
|
codes.InvalidArgument,
|
|
|
|
"multi node access modes are only supported on rbd `block` type volumes")
|
2019-03-14 00:18:04 +00:00
|
|
|
}
|
|
|
|
|
2021-06-07 07:31:38 +00:00
|
|
|
if imageFeatures, ok := req.GetParameters()["imageFeatures"]; checkImageFeatures(imageFeatures, ok, true) {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, "missing required parameter imageFeatures")
|
|
|
|
}
|
|
|
|
|
2019-03-14 00:18:04 +00:00
|
|
|
// if it's NOT SINGLE_NODE_WRITER and it's BLOCK we'll set the parameter to ignore the in-use checks
|
2021-09-06 05:27:50 +00:00
|
|
|
rbdVol, err := genVolFromVolumeOptions(
|
|
|
|
ctx,
|
|
|
|
req.GetParameters(), req.GetSecrets(),
|
|
|
|
(isMultiNode && isBlock), false)
|
2019-01-29 05:49:16 +00:00
|
|
|
if err != nil {
|
2019-03-07 12:56:47 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
|
|
|
|
2021-06-15 07:23:47 +00:00
|
|
|
rbdVol.ThickProvision = isThickProvisionRequest(req.GetParameters())
|
2020-11-09 09:07:47 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
rbdVol.RequestName = req.GetName()
|
|
|
|
|
2019-01-29 05:49:16 +00:00
|
|
|
// Volume Size - Default is 1 GiB
|
|
|
|
volSizeBytes := int64(oneGB)
|
|
|
|
if req.GetCapacityRange() != nil {
|
|
|
|
volSizeBytes = req.GetCapacityRange().GetRequiredBytes()
|
|
|
|
}
|
2019-03-01 12:08:17 +00:00
|
|
|
|
2019-09-25 08:35:33 +00:00
|
|
|
// always round up the request size in bytes to the nearest MiB/GiB
|
|
|
|
rbdVol.VolSize = util.RoundOffBytes(volSizeBytes)
|
2019-01-29 05:49:16 +00:00
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
// start with pool the same as journal pool, in case there is a topology
|
|
|
|
// based split, pool for the image will be updated subsequently
|
|
|
|
rbdVol.JournalPool = rbdVol.Pool
|
|
|
|
|
|
|
|
// store topology information from the request
|
|
|
|
rbdVol.TopologyPools, rbdVol.TopologyRequirement, err = util.GetTopologyFromRequest(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// NOTE: rbdVol does not contain VolID and RbdImageName populated, everything
|
|
|
|
// else is populated post create request parsing
|
2019-01-29 05:49:16 +00:00
|
|
|
return rbdVol, nil
|
|
|
|
}
|
|
|
|
|
2021-04-01 11:18:28 +00:00
|
|
|
func buildCreateVolumeResponse(req *csi.CreateVolumeRequest, rbdVol *rbdVolume) *csi.CreateVolumeResponse {
|
2020-04-21 07:52:26 +00:00
|
|
|
volumeContext := req.GetParameters()
|
|
|
|
volumeContext["pool"] = rbdVol.Pool
|
|
|
|
volumeContext["journalPool"] = rbdVol.JournalPool
|
|
|
|
volumeContext["imageName"] = rbdVol.RbdImageName
|
2020-06-01 13:57:51 +00:00
|
|
|
if rbdVol.RadosNamespace != "" {
|
|
|
|
volumeContext["radosNamespace"] = rbdVol.RadosNamespace
|
|
|
|
}
|
2020-04-21 07:52:26 +00:00
|
|
|
volume := &csi.Volume{
|
|
|
|
VolumeId: rbdVol.VolID,
|
|
|
|
CapacityBytes: rbdVol.VolSize,
|
|
|
|
VolumeContext: volumeContext,
|
|
|
|
ContentSource: req.GetVolumeContentSource(),
|
|
|
|
}
|
|
|
|
if rbdVol.Topology != nil {
|
|
|
|
volume.AccessibleTopology =
|
|
|
|
[]*csi.Topology{
|
|
|
|
{
|
|
|
|
Segments: rbdVol.Topology,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-01 11:18:28 +00:00
|
|
|
return &csi.CreateVolumeResponse{Volume: volume}
|
2020-04-21 07:52:26 +00:00
|
|
|
}
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
// getGRPCErrorForCreateVolume converts the returns the GRPC errors based on
|
|
|
|
// the input error types it expected to use only for CreateVolume as we need to
|
|
|
|
// return different GRPC codes for different functions based on the input.
|
|
|
|
func getGRPCErrorForCreateVolume(err error) error {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrVolNameConflict) {
|
2020-07-07 12:14:19 +00:00
|
|
|
return status.Error(codes.AlreadyExists, err.Error())
|
|
|
|
}
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrFlattenInProgress) {
|
2020-07-07 12:14:19 +00:00
|
|
|
return status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2021-12-15 05:29:41 +00:00
|
|
|
func checkValidCreateVolumeRequest(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSnapshot) error {
|
|
|
|
var err error
|
2021-06-01 11:33:29 +00:00
|
|
|
switch {
|
|
|
|
case rbdSnap != nil:
|
|
|
|
err = rbdSnap.isCompatibleEncryption(&rbdVol.rbdImage)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.InvalidArgument, "cannot restore from snapshot %s: %s", rbdSnap, err.Error())
|
|
|
|
}
|
2021-06-17 06:15:29 +00:00
|
|
|
|
|
|
|
err = rbdSnap.isCompatibleThickProvision(rbdVol)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.InvalidArgument, "cannot restore from snapshot %s: %s", rbdSnap, err.Error())
|
|
|
|
}
|
2021-06-01 11:33:29 +00:00
|
|
|
case parentVol != nil:
|
|
|
|
err = parentVol.isCompatibleEncryption(&rbdVol.rbdImage)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.InvalidArgument, "cannot clone from volume %s: %s", parentVol, err.Error())
|
|
|
|
}
|
2021-06-17 06:15:29 +00:00
|
|
|
|
|
|
|
err = parentVol.isCompatibleThickProvision(rbdVol)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.InvalidArgument, "cannot clone from volume %s: %s", parentVol, err.Error())
|
|
|
|
}
|
2021-06-01 11:33:29 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-01 11:33:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// CreateVolume creates the volume in backend.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) CreateVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
2021-11-19 04:40:12 +00:00
|
|
|
err := cs.validateVolumeReq(ctx, req)
|
|
|
|
if err != nil {
|
2019-01-17 05:27:55 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2020-01-07 13:45:52 +00:00
|
|
|
// TODO: create/get a connection from the the ConnPool, and do not pass
|
|
|
|
// the credentials to any of the utility functions.
|
2021-11-19 04:40:12 +00:00
|
|
|
|
|
|
|
cr, err := util.NewUserCredentialsWithMigration(req.GetSecrets())
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
2021-11-19 04:40:12 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2019-06-01 21:26:42 +00:00
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2019-08-22 16:57:23 +00:00
|
|
|
rbdVol, err := cs.parseVolCreateRequest(ctx, req)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-07 13:45:52 +00:00
|
|
|
defer rbdVol.Destroy()
|
2019-09-12 04:53:37 +00:00
|
|
|
// Existence and conflict checks
|
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(req.GetName()); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
|
|
|
}
|
|
|
|
defer cs.VolumeLocks.Release(req.GetName())
|
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
|
|
|
|
2020-03-17 12:57:01 +00:00
|
|
|
err = rbdVol.Connect(cr)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-03-17 12:57:01 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
parentVol, rbdSnap, err := checkContentSource(ctx, req, cr)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
found, err := rbdVol.Exists(ctx, parentVol)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, getGRPCErrorForCreateVolume(err)
|
2021-05-26 08:33:14 +00:00
|
|
|
} else if found {
|
2021-06-17 09:27:11 +00:00
|
|
|
return cs.repairExistingVolume(ctx, req, cr, rbdVol, parentVol, rbdSnap)
|
2018-03-06 22:33:57 +00:00
|
|
|
}
|
|
|
|
|
2021-12-15 05:29:41 +00:00
|
|
|
err = checkValidCreateVolumeRequest(rbdVol, parentVol, rbdSnap)
|
2020-07-16 09:58:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
err = flattenParentImage(ctx, parentVol, cr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-07-07 12:14:19 +00:00
|
|
|
}
|
2020-07-12 04:36:17 +00:00
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
err = reserveVol(ctx, rbdVol, rbdSnap, cr)
|
2018-01-16 01:52:28 +00:00
|
|
|
if err != nil {
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-01-16 01:52:28 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrFlattenInProgress) {
|
2020-06-24 07:43:24 +00:00
|
|
|
errDefer := undoVolReservation(ctx, rbdVol, cr)
|
|
|
|
if errDefer != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer)
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2018-01-16 01:52:28 +00:00
|
|
|
|
2021-03-30 20:08:24 +00:00
|
|
|
err = cs.createBackingImage(ctx, cr, req.GetSecrets(), rbdVol, parentVol, rbdSnap)
|
2019-01-28 19:55:10 +00:00
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrFlattenInProgress) {
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, err
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 11:24:38 +00:00
|
|
|
return buildCreateVolumeResponse(req, rbdVol), nil
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
func flattenParentImage(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
|
|
|
|
if rbdVol != nil {
|
|
|
|
// flatten the image or its parent before the reservation to avoid
|
|
|
|
// stale entries in post creation if we return ABORT error and the
|
|
|
|
// delete volume is not called
|
|
|
|
err := rbdVol.flattenCloneImage(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return getGRPCErrorForCreateVolume(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// flatten cloned images if the snapshot count on the parent image
|
|
|
|
// exceeds maxSnapshotsOnImage
|
|
|
|
err = flattenTemporaryClonedImages(ctx, rbdVol, cr)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-05-26 08:33:14 +00:00
|
|
|
// repairExistingVolume checks the existing volume or snapshot and makes sure
|
|
|
|
// that the state is corrected to what was requested. It is needed to call this
|
|
|
|
// when the process of creating a volume was interrupted.
|
|
|
|
func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.CreateVolumeRequest,
|
2021-06-17 09:27:11 +00:00
|
|
|
cr *util.Credentials, rbdVol, parentVol *rbdVolume, rbdSnap *rbdSnapshot) (*csi.CreateVolumeResponse, error) {
|
2021-05-26 08:29:32 +00:00
|
|
|
vcs := req.GetVolumeContentSource()
|
|
|
|
|
|
|
|
switch {
|
|
|
|
// normal CreateVolume without VolumeContentSource
|
|
|
|
case vcs == nil:
|
|
|
|
// continue/restart allocating the volume in case it
|
|
|
|
// should be thick-provisioned
|
2021-06-15 07:23:47 +00:00
|
|
|
if isThickProvisionRequest(req.GetParameters()) {
|
2021-05-26 08:29:32 +00:00
|
|
|
err := rbdVol.RepairThickProvision()
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// rbdVol is a restore from snapshot, rbdSnap is passed
|
|
|
|
case vcs.GetSnapshot() != nil:
|
2021-06-18 09:38:58 +00:00
|
|
|
// When restoring of a thick-provisioned volume was happening,
|
|
|
|
// the image should be marked as thick-provisioned, unless it
|
|
|
|
// was aborted in flight. In order to restart the
|
|
|
|
// thick-restoring, delete the volume and let the caller retry
|
|
|
|
// from the start.
|
|
|
|
if isThickProvisionRequest(req.GetParameters()) {
|
|
|
|
thick, err := rbdVol.isThickProvisioned()
|
|
|
|
if err != nil {
|
2021-06-25 11:39:42 +00:00
|
|
|
return nil, status.Errorf(
|
|
|
|
codes.Aborted,
|
|
|
|
"failed to verify thick-provisioned volume %q: %s",
|
|
|
|
rbdVol,
|
|
|
|
err)
|
2021-06-18 09:38:58 +00:00
|
|
|
} else if !thick {
|
|
|
|
err = deleteImage(ctx, rbdVol, cr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Errorf(codes.Aborted, "failed to remove partially cloned volume %q: %s", rbdVol, err)
|
|
|
|
}
|
|
|
|
err = undoVolReservation(ctx, rbdVol, cr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Errorf(codes.Aborted, "failed to remove volume %q from journal: %s", rbdVol, err)
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
return nil, status.Errorf(
|
|
|
|
codes.Aborted,
|
|
|
|
"restoring thick-provisioned volume %q has been interrupted, please retry", rbdVol)
|
2021-06-18 09:38:58 +00:00
|
|
|
}
|
|
|
|
}
|
2021-08-17 05:39:56 +00:00
|
|
|
// restore from snapshot implies rbdSnap != nil
|
2021-05-26 08:33:14 +00:00
|
|
|
// check if image depth is reached limit and requires flatten
|
|
|
|
err := checkFlatten(ctx, rbdVol, cr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rbdSnap.repairEncryptionConfig(&rbdVol.rbdImage)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-05-26 08:29:32 +00:00
|
|
|
|
2021-06-17 09:27:11 +00:00
|
|
|
// rbdVol is a clone from parentVol
|
2021-05-26 08:29:32 +00:00
|
|
|
case vcs.GetVolume() != nil:
|
2021-06-17 09:27:11 +00:00
|
|
|
// When cloning into a thick-provisioned volume was happening,
|
|
|
|
// the image should be marked as thick-provisioned, unless it
|
|
|
|
// was aborted in flight. In order to restart the
|
2021-06-17 13:35:15 +00:00
|
|
|
// thick-cloning, delete the volume and undo the reservation in
|
|
|
|
// the journal to let the caller retry from the start.
|
2021-06-17 09:27:11 +00:00
|
|
|
if isThickProvisionRequest(req.GetParameters()) {
|
|
|
|
thick, err := rbdVol.isThickProvisioned()
|
|
|
|
if err != nil {
|
2021-06-25 11:39:42 +00:00
|
|
|
return nil, status.Errorf(
|
|
|
|
codes.Internal,
|
|
|
|
"failed to verify thick-provisioned volume %q: %s",
|
|
|
|
rbdVol,
|
|
|
|
err)
|
2021-06-17 09:27:11 +00:00
|
|
|
} else if !thick {
|
2021-12-15 05:40:50 +00:00
|
|
|
return nil, cleanupThickClone(ctx, parentVol, rbdVol, rbdSnap, cr)
|
2021-06-17 09:27:11 +00:00
|
|
|
}
|
|
|
|
}
|
2021-05-26 08:33:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return buildCreateVolumeResponse(req, rbdVol), nil
|
|
|
|
}
|
|
|
|
|
2021-12-15 05:40:50 +00:00
|
|
|
// cleanupThickClone will delete the snapshot and volume and undo the reservation.
|
|
|
|
func cleanupThickClone(ctx context.Context,
|
|
|
|
rbdVol,
|
|
|
|
parentVol *rbdVolume,
|
|
|
|
rbdSnap *rbdSnapshot,
|
|
|
|
cr *util.Credentials) error {
|
|
|
|
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, rbdVol, cr)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.Internal, "failed to remove partially cloned volume %q: %s", rbdVol, err)
|
|
|
|
}
|
|
|
|
err = undoVolReservation(ctx, rbdVol, cr)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.Internal, "failed to remove volume %q from journal: %s", rbdVol, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return status.Errorf(
|
|
|
|
codes.Internal,
|
|
|
|
"cloning thick-provisioned volume %q has been interrupted, please retry", rbdVol)
|
|
|
|
}
|
|
|
|
|
2020-11-17 03:34:29 +00:00
|
|
|
// check snapshots on the rbd image, as we have limit from krbd that an image
|
|
|
|
// cannot have more than 510 snapshot at a given point of time. If the
|
|
|
|
// snapshots are more than the `maxSnapshotsOnImage` Add a task to flatten all
|
|
|
|
// the temporary cloned images and return ABORT error message. If the snapshots
|
|
|
|
// are more than the `minSnapshotOnImage` Add a task to flatten all the
|
|
|
|
// temporary cloned images.
|
2020-07-12 04:36:17 +00:00
|
|
|
func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
|
2021-06-15 07:23:47 +00:00
|
|
|
if rbdVol.ThickProvision {
|
|
|
|
// thick-provisioned images do not need flattening
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-27 11:09:45 +00:00
|
|
|
snaps, err := rbdVol.listSnapshots()
|
2020-07-12 04:36:17 +00:00
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
2020-07-12 04:36:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(snaps) > int(maxSnapshotsOnImage) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
"snapshots count %d on image: %s reached configured hard limit %d",
|
|
|
|
len(snaps),
|
|
|
|
rbdVol,
|
|
|
|
maxSnapshotsOnImage)
|
2020-08-27 11:09:45 +00:00
|
|
|
err = flattenClonedRbdImages(
|
|
|
|
ctx,
|
|
|
|
snaps,
|
|
|
|
rbdVol.Pool,
|
|
|
|
rbdVol.Monitors,
|
|
|
|
rbdVol.RbdImageName,
|
|
|
|
cr)
|
2020-07-12 04:36:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
return status.Errorf(codes.ResourceExhausted, "rbd image %s has %d snapshots", rbdVol, len(snaps))
|
|
|
|
}
|
2020-11-17 03:34:29 +00:00
|
|
|
|
|
|
|
if len(snaps) > int(minSnapshotsOnImageToStartFlatten) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
"snapshots count %d on image: %s reached configured soft limit %d",
|
|
|
|
len(snaps),
|
|
|
|
rbdVol,
|
|
|
|
minSnapshotsOnImageToStartFlatten)
|
2020-11-17 03:34:29 +00:00
|
|
|
// If we start flattening all the snapshots at one shot the volume
|
|
|
|
// creation time will be affected,so we will flatten only the extra
|
|
|
|
// snapshots.
|
|
|
|
snaps = snaps[minSnapshotsOnImageToStartFlatten-1:]
|
|
|
|
err = flattenClonedRbdImages(
|
|
|
|
ctx,
|
|
|
|
snaps,
|
|
|
|
rbdVol.Pool,
|
|
|
|
rbdVol.Monitors,
|
|
|
|
rbdVol.RbdImageName,
|
|
|
|
cr)
|
|
|
|
if err != nil {
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-17 05:39:56 +00:00
|
|
|
// checkFlatten ensures that the image chain depth is not reached
|
2020-06-24 07:43:24 +00:00
|
|
|
// hardlimit or softlimit. if the softlimit is reached it adds a task and
|
|
|
|
// return success,the hardlimit is reached it starts a task to flatten the
|
2020-07-19 12:21:03 +00:00
|
|
|
// image and return Aborted.
|
2020-06-24 07:43:24 +00:00
|
|
|
func checkFlatten(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
|
2020-07-07 12:14:19 +00:00
|
|
|
err := rbdVol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrFlattenInProgress) {
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
|
|
|
if errDefer := deleteImage(ctx, rbdVol, cr); errDefer != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, errDefer)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
errDefer := undoVolReservation(ctx, rbdVol, cr)
|
|
|
|
if errDefer != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", rbdVol.RequestName, errDefer)
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) createVolumeFromSnapshot(
|
|
|
|
ctx context.Context,
|
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string,
|
|
|
|
rbdVol *rbdVolume,
|
|
|
|
snapshotID string) error {
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdSnap := &rbdSnapshot{}
|
|
|
|
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID)
|
|
|
|
}
|
|
|
|
defer cs.SnapshotLocks.Release(snapshotID)
|
|
|
|
|
2021-03-30 20:08:24 +00:00
|
|
|
err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, secrets)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2020-07-08 23:00:23 +00:00
|
|
|
if errors.Is(err, util.ErrPoolNotFound) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// update parent name(rbd image name in snapshot)
|
|
|
|
rbdSnap.RbdImageName = rbdSnap.RbdSnapName
|
2021-03-18 11:23:08 +00:00
|
|
|
parentVol := generateVolFromSnap(rbdSnap)
|
|
|
|
// as we are operating on single cluster reuse the connection
|
|
|
|
parentVol.conn = rbdVol.conn.Copy()
|
|
|
|
|
2021-06-17 13:35:15 +00:00
|
|
|
if rbdVol.ThickProvision {
|
|
|
|
err = parentVol.DeepCopy(rbdVol)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.Internal, "failed to deep copy %q into %q: %v", parentVol, rbdVol, err)
|
|
|
|
}
|
|
|
|
err = rbdVol.setThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.Internal, "failed to mark %q thick-provisioned: %s", rbdVol, err)
|
|
|
|
}
|
2021-10-01 05:46:20 +00:00
|
|
|
err = parentVol.copyEncryptionConfig(&rbdVol.rbdImage, true)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.Internal, err.Error())
|
|
|
|
}
|
2021-06-17 13:35:15 +00:00
|
|
|
} else {
|
|
|
|
// create clone image and delete snapshot
|
|
|
|
err = rbdVol.cloneRbdImageFromSnapshot(ctx, rbdSnap, parentVol)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to clone rbd image %s from snapshot %s: %v", rbdVol, rbdSnap, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-17 13:35:15 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "create volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) createBackingImage(
|
|
|
|
ctx context.Context,
|
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string,
|
|
|
|
rbdVol, parentVol *rbdVolume,
|
|
|
|
rbdSnap *rbdSnapshot) error {
|
2019-01-28 19:55:10 +00:00
|
|
|
var err error
|
|
|
|
|
2021-07-13 12:21:05 +00:00
|
|
|
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
2021-01-26 14:08:59 +00:00
|
|
|
switch {
|
|
|
|
case rbdSnap != nil:
|
2021-03-12 12:50:07 +00:00
|
|
|
if err = cs.OperationLocks.GetRestoreLock(rbdSnap.VolID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
2021-03-12 12:50:07 +00:00
|
|
|
defer cs.OperationLocks.ReleaseRestoreLock(rbdSnap.VolID)
|
2020-07-13 05:28:17 +00:00
|
|
|
|
2021-03-30 20:08:24 +00:00
|
|
|
err = cs.createVolumeFromSnapshot(ctx, cr, secrets, rbdVol, rbdSnap.VolID)
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
2020-01-24 16:26:56 +00:00
|
|
|
return err
|
2019-06-01 21:26:42 +00:00
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "created volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName)
|
2021-01-26 14:08:59 +00:00
|
|
|
case parentVol != nil:
|
2020-07-13 05:28:17 +00:00
|
|
|
if err = cs.OperationLocks.GetCloneLock(parentVol.VolID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return status.Error(codes.Aborted, err.Error())
|
2020-07-07 12:14:19 +00:00
|
|
|
}
|
2020-07-13 05:28:17 +00:00
|
|
|
defer cs.OperationLocks.ReleaseCloneLock(parentVol.VolID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return rbdVol.createCloneFromImage(ctx, parentVol)
|
2021-01-26 14:08:59 +00:00
|
|
|
default:
|
2020-06-24 07:43:24 +00:00
|
|
|
err = createImage(ctx, rbdVol, cr)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create volume: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "created volume %s backed by image %s", rbdVol.RequestName, rbdVol.RbdImageName)
|
2020-06-24 07:43:24 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrFlattenInProgress) {
|
2020-06-24 07:43:24 +00:00
|
|
|
if deleteErr := deleteImage(ctx, rbdVol, cr); deleteErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr)
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2020-12-04 08:36:05 +00:00
|
|
|
err = rbdVol.storeImageID(ctx, j)
|
2020-01-24 16:26:56 +00:00
|
|
|
if err != nil {
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
if rbdSnap != nil {
|
2020-07-07 12:14:19 +00:00
|
|
|
err = rbdVol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to flatten image %s: %v", rbdVol, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-12-13 11:41:32 +00:00
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func checkContentSource(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.CreateVolumeRequest,
|
|
|
|
cr *util.Credentials) (*rbdVolume, *rbdSnapshot, error) {
|
2020-01-24 16:26:56 +00:00
|
|
|
if req.VolumeContentSource == nil {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, nil, nil
|
2020-01-24 16:26:56 +00:00
|
|
|
}
|
2020-07-07 12:14:19 +00:00
|
|
|
volumeSource := req.VolumeContentSource
|
|
|
|
switch volumeSource.Type.(type) {
|
|
|
|
case *csi.VolumeContentSource_Snapshot:
|
|
|
|
snapshot := req.VolumeContentSource.GetSnapshot()
|
|
|
|
if snapshot == nil {
|
|
|
|
return nil, nil, status.Error(codes.NotFound, "volume Snapshot cannot be empty")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-07-07 12:14:19 +00:00
|
|
|
snapshotID := snapshot.GetSnapshotId()
|
|
|
|
if snapshotID == "" {
|
|
|
|
return nil, nil, status.Errorf(codes.NotFound, "volume Snapshot ID cannot be empty")
|
|
|
|
}
|
|
|
|
rbdSnap := &rbdSnapshot{}
|
2021-03-30 20:08:24 +00:00
|
|
|
if err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrSnapNotFound) {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-11-24 11:54:29 +00:00
|
|
|
return nil, nil, status.Errorf(codes.NotFound, "%s snapshot does not exist", snapshotID)
|
2020-01-31 08:49:11 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, rbdSnap, nil
|
|
|
|
case *csi.VolumeContentSource_Volume:
|
|
|
|
vol := req.VolumeContentSource.GetVolume()
|
|
|
|
if vol == nil {
|
|
|
|
return nil, nil, status.Error(codes.NotFound, "volume cannot be empty")
|
|
|
|
}
|
|
|
|
volID := vol.GetVolumeId()
|
|
|
|
if volID == "" {
|
|
|
|
return nil, nil, status.Errorf(codes.NotFound, "volume ID cannot be empty")
|
|
|
|
}
|
2021-12-09 08:00:52 +00:00
|
|
|
rbdvol, err := GenVolFromVolID(ctx, volID, cr, req.GetSecrets())
|
2020-07-07 12:14:19 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend image for %s: %v", volID, err)
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrImageNotFound) {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-11-24 11:54:29 +00:00
|
|
|
return nil, nil, status.Errorf(codes.NotFound, "%s image does not exist", volID)
|
2020-07-07 12:14:19 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return rbdvol, nil, nil
|
2019-01-17 05:27:55 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, nil, status.Errorf(codes.InvalidArgument, "not a proper volume source")
|
2019-01-17 05:27:55 +00:00
|
|
|
}
|
2019-01-17 06:49:35 +00:00
|
|
|
|
2021-12-09 08:00:52 +00:00
|
|
|
// checkErrAndUndoReserve work on error from GenVolFromVolID() and undo omap reserve.
|
2021-09-02 04:01:55 +00:00
|
|
|
// Even-though volumeID is part of rbdVolume struct we take it as an arg here, the main reason
|
2021-12-09 08:00:52 +00:00
|
|
|
// being, the volume id is getting filled from `GenVolFromVolID->generateVolumeFromVolumeID` call path,
|
2021-09-02 04:01:55 +00:00
|
|
|
// and this function is operating on the error case/scenario of above call chain, so we can not rely
|
|
|
|
// on the 'rbdvol->rbdimage->voldID' field.
|
|
|
|
|
|
|
|
func (cs *ControllerServer) checkErrAndUndoReserve(
|
|
|
|
ctx context.Context,
|
|
|
|
err error,
|
|
|
|
volumeID string,
|
|
|
|
rbdVol *rbdVolume, cr *util.Credentials) (*csi.DeleteVolumeResponse, error) {
|
|
|
|
if errors.Is(err, util.ErrPoolNotFound) {
|
|
|
|
log.WarningLog(ctx, "failed to get backend volume for %s: %v", volumeID, err)
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// if error is ErrKeyNotFound, then a previous attempt at deletion was complete
|
|
|
|
// or partially complete (image and imageOMap are garbage collected already), hence return
|
|
|
|
// success as deletion is complete
|
|
|
|
if errors.Is(err, util.ErrKeyNotFound) {
|
|
|
|
log.WarningLog(ctx, "failed to volume options for %s: %v", volumeID, err)
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2021-08-23 11:34:29 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
|
|
|
err = rbdVol.ensureImageCleanup(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// All errors other than ErrImageNotFound should return an error back to the caller
|
2021-09-02 04:01:55 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// If error is ErrImageNotFound then we failed to find the image, but found the imageOMap
|
|
|
|
// to lead us to the image, hence the imageOMap needs to be garbage collected, by calling
|
|
|
|
// unreserve for the same
|
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired {
|
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
|
|
|
|
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
|
|
|
}
|
|
|
|
defer cs.VolumeLocks.Release(rbdVol.RequestName)
|
|
|
|
|
|
|
|
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// DeleteVolume deletes the volume in backend and removes the volume metadata
|
2021-09-02 04:01:55 +00:00
|
|
|
// from store.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) DeleteVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
2021-10-24 08:48:28 +00:00
|
|
|
var err error
|
|
|
|
if err = cs.Driver.ValidateControllerServiceRequest(
|
2021-06-25 11:39:42 +00:00
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid delete volume req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
// For now the image get unconditionally deleted, but here retention policy can be checked
|
2018-01-16 01:52:28 +00:00
|
|
|
volumeID := req.GetVolumeId()
|
2019-04-22 21:35:39 +00:00
|
|
|
if volumeID == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2019-01-28 13:59:16 +00:00
|
|
|
|
2021-11-19 04:40:12 +00:00
|
|
|
cr, err := util.NewUserCredentialsWithMigration(req.GetSecrets())
|
2021-10-24 08:48:28 +00:00
|
|
|
if err != nil {
|
2021-11-19 04:40:12 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2021-10-24 08:48:28 +00:00
|
|
|
}
|
|
|
|
defer cr.DeleteCredentials()
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(volumeID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
|
|
|
}
|
|
|
|
defer cs.VolumeLocks.Release(volumeID)
|
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
// lock out volumeID for clone and expand operation
|
|
|
|
if err = cs.OperationLocks.GetDeleteLock(volumeID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
|
|
|
defer cs.OperationLocks.ReleaseDeleteLock(volumeID)
|
|
|
|
|
2021-10-25 09:17:12 +00:00
|
|
|
// if this is a migration request volID, delete the volume in backend
|
2021-10-01 04:18:19 +00:00
|
|
|
if isMigrationVolID(volumeID) {
|
2021-10-25 09:17:12 +00:00
|
|
|
pmVolID, pErr := parseMigrationVolID(volumeID)
|
|
|
|
if pErr != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, pErr.Error())
|
|
|
|
}
|
|
|
|
pErr = deleteMigratedVolume(ctx, pmVolID, cr)
|
|
|
|
if pErr != nil && !errors.Is(pErr, ErrImageNotFound) {
|
|
|
|
return nil, status.Error(codes.Internal, pErr.Error())
|
2021-10-01 04:18:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2021-11-19 04:40:12 +00:00
|
|
|
rbdVol, err := GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
2020-10-29 09:48:31 +00:00
|
|
|
defer rbdVol.Destroy()
|
2020-03-17 13:39:35 +00:00
|
|
|
if err != nil {
|
2021-09-02 04:01:55 +00:00
|
|
|
return cs.checkErrAndUndoReserve(ctx, err, volumeID, rbdVol, cr)
|
2019-02-26 13:19:00 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// lock out parallel create requests against the same volume name as we
|
2021-08-17 05:39:56 +00:00
|
|
|
// clean up the image and associated omaps for the same
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
|
|
|
}
|
|
|
|
defer cs.VolumeLocks.Release(rbdVol.RequestName)
|
2019-02-18 08:22:52 +00:00
|
|
|
|
2021-08-12 07:28:13 +00:00
|
|
|
return cleanupRBDImage(ctx, rbdVol, cr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// cleanupRBDImage removes the rbd image and OMAP metadata associated with it.
|
|
|
|
func cleanupRBDImage(ctx context.Context,
|
|
|
|
rbdVol *rbdVolume, cr *util.Credentials) (*csi.DeleteVolumeResponse, error) {
|
|
|
|
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-08-12 07:28:13 +00:00
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
// Cleanup only omap data if the following condition is met
|
|
|
|
// Mirroring is enabled on the image
|
|
|
|
// Local image is secondary
|
|
|
|
// Local image is in up+replaying state
|
|
|
|
if mirroringInfo.State == librbd.MirrorImageEnabled && !mirroringInfo.Primary {
|
|
|
|
// If the image is in a secondary state and its up+replaying means its
|
|
|
|
// an healthy secondary and the image is primary somewhere in the
|
|
|
|
// remote cluster and the local image is getting replayed. Delete the
|
|
|
|
// OMAP data generated as we cannot delete the secondary image. When
|
|
|
|
// the image on the primary cluster gets deleted/mirroring disabled,
|
|
|
|
// the image on all the remote (secondary) clusters will get
|
|
|
|
// auto-deleted. This helps in garbage collecting the OMAP, PVC and PV
|
|
|
|
// objects after failback operation.
|
|
|
|
localStatus, rErr := rbdVol.getLocalState()
|
|
|
|
if rErr != nil {
|
|
|
|
return nil, status.Error(codes.Internal, rErr.Error())
|
|
|
|
}
|
|
|
|
if localStatus.Up && localStatus.State == librbd.MirrorImageStatusStateReplaying {
|
|
|
|
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)",
|
2021-08-12 07:28:13 +00:00
|
|
|
rbdVol.RequestName, rbdVol.RbdImageName, err)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx,
|
2021-08-12 07:28:13 +00:00
|
|
|
"secondary image status is up=%t and state=%s",
|
|
|
|
localStatus.Up,
|
|
|
|
localStatus.State)
|
|
|
|
}
|
|
|
|
|
2020-07-22 13:33:36 +00:00
|
|
|
inUse, err := rbdVol.isInUse()
|
2020-07-02 06:45:47 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed getting information for image (%s): (%s)", rbdVol, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-02 06:45:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2020-07-22 13:33:36 +00:00
|
|
|
if inUse {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "rbd %s is still being used", rbdVol)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-02 06:45:47 +00:00
|
|
|
return nil, status.Errorf(codes.Internal, "rbd %s is still being used", rbdVol.RbdImageName)
|
|
|
|
}
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
// delete the temporary rbd image created as part of volume clone during
|
|
|
|
// create volume
|
|
|
|
tempClone := rbdVol.generateTempClone()
|
|
|
|
err = deleteImage(ctx, tempClone, cr)
|
|
|
|
if err != nil {
|
2021-08-23 11:34:29 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
|
|
|
err = tempClone.ensureImageCleanup(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// return error if it is not ErrImageNotFound
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v",
|
2020-07-07 12:14:19 +00:00
|
|
|
tempClone, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// Deleting rbd image
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "deleting image %s", rbdVol.RbdImageName)
|
2020-01-29 11:44:45 +00:00
|
|
|
if err = deleteImage(ctx, rbdVol, cr); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v",
|
2020-05-28 18:39:44 +00:00
|
|
|
rbdVol, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2019-02-18 08:22:52 +00:00
|
|
|
}
|
|
|
|
|
2020-01-29 11:44:45 +00:00
|
|
|
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)",
|
2019-05-31 18:09:24 +00:00
|
|
|
rbdVol.RequestName, rbdVol.RbdImageName, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-05-31 18:09:24 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
2019-02-18 08:22:52 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// ValidateVolumeCapabilities checks whether the volume capabilities requested
|
|
|
|
// are supported.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) ValidateVolumeCapabilities(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
2019-04-22 21:35:39 +00:00
|
|
|
if req.GetVolumeId() == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(req.VolumeCapabilities) == 0 {
|
2019-05-13 04:47:17 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "empty volume capabilities in request")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 07:00:55 +00:00
|
|
|
for _, capability := range req.VolumeCapabilities {
|
|
|
|
if capability.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
|
2019-03-13 18:19:14 +00:00
|
|
|
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
|
2018-01-18 19:13:08 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-11-24 19:18:24 +00:00
|
|
|
return &csi.ValidateVolumeCapabilitiesResponse{
|
|
|
|
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
|
|
|
|
VolumeCapabilities: req.VolumeCapabilities,
|
|
|
|
},
|
|
|
|
}, nil
|
2018-01-18 19:13:08 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
// CreateSnapshot creates the snapshot in backend and stores metadata in store.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) CreateSnapshot(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
2019-08-22 16:57:23 +00:00
|
|
|
if err := cs.validateSnapshotReq(ctx, req); err != nil {
|
2019-01-28 19:55:10 +00:00
|
|
|
return nil, err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2019-01-28 13:59:16 +00:00
|
|
|
|
2019-06-25 19:29:17 +00:00
|
|
|
cr, err := util.NewUserCredentials(req.GetSecrets())
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// Fetch source volume information
|
2021-12-09 08:00:52 +00:00
|
|
|
rbdVol, err := GenVolFromVolID(ctx, req.GetSourceVolumeId(), cr, req.GetSecrets())
|
2020-10-29 09:48:31 +00:00
|
|
|
defer rbdVol.Destroy()
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2021-01-26 14:08:59 +00:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, ErrImageNotFound):
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.NotFound, "source Volume ID %s not found", req.GetSourceVolumeId())
|
2021-01-26 14:08:59 +00:00
|
|
|
case errors.Is(err, util.ErrPoolNotFound):
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend volume for %s: %v", req.GetSourceVolumeId(), err)
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.NotFound, err.Error())
|
2021-01-26 14:08:59 +00:00
|
|
|
default:
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.Internal, err.Error())
|
2020-01-31 08:49:11 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-05-11 08:48:52 +00:00
|
|
|
return nil, err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// Check if source volume was created with required image features for snaps
|
2020-06-18 11:33:06 +00:00
|
|
|
if !rbdVol.hasSnapshotFeature() {
|
2021-06-25 11:39:42 +00:00
|
|
|
return nil, status.Errorf(
|
|
|
|
codes.InvalidArgument,
|
|
|
|
"volume(%s) has not snapshot feature(layering)",
|
|
|
|
req.GetSourceVolumeId())
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2020-07-30 08:54:15 +00:00
|
|
|
rbdSnap, err := genSnapFromOptions(ctx, rbdVol, req.GetParameters())
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
rbdSnap.RbdImageName = rbdVol.RbdImageName
|
|
|
|
rbdSnap.SizeBytes = rbdVol.VolSize
|
|
|
|
rbdSnap.SourceVolumeID = req.GetSourceVolumeId()
|
|
|
|
rbdSnap.RequestName = req.GetName()
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := cs.SnapshotLocks.TryAcquire(req.GetName()); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, req.GetName())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
|
|
|
}
|
|
|
|
defer cs.SnapshotLocks.Release(req.GetName())
|
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
|
|
|
|
2020-07-02 08:48:12 +00:00
|
|
|
// Take lock on parent rbd image
|
2020-07-13 05:28:17 +00:00
|
|
|
if err = cs.OperationLocks.GetSnapshotCreateLock(rbdSnap.SourceVolumeID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
2020-07-02 08:48:12 +00:00
|
|
|
}
|
2020-07-13 05:28:17 +00:00
|
|
|
defer cs.OperationLocks.ReleaseSnapshotCreateLock(rbdSnap.SourceVolumeID)
|
2020-07-02 08:48:12 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// Need to check for already existing snapshot name, and if found
|
|
|
|
// check for the requested source volume id and already allocated source volume id
|
2020-06-24 07:43:24 +00:00
|
|
|
found, err := checkSnapCloneExists(ctx, rbdVol, rbdSnap, cr)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2020-07-08 23:00:23 +00:00
|
|
|
if errors.Is(err, util.ErrSnapNameConflict) {
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, status.Error(codes.AlreadyExists, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, status.Errorf(codes.Internal, err.Error())
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
if found {
|
2021-04-08 09:21:44 +00:00
|
|
|
return cloneFromSnapshot(ctx, rbdVol, rbdSnap, cr)
|
2018-08-09 13:07:13 +00:00
|
|
|
}
|
2020-07-12 04:42:19 +00:00
|
|
|
|
|
|
|
err = flattenTemporaryClonedImages(ctx, rbdVol, cr)
|
2020-07-01 07:05:07 +00:00
|
|
|
if err != nil {
|
2020-07-12 04:42:19 +00:00
|
|
|
return nil, err
|
2020-07-01 07:05:07 +00:00
|
|
|
}
|
2018-08-09 13:07:13 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
err = reserveSnap(ctx, rbdSnap, rbdVol, cr)
|
2019-01-28 19:55:10 +00:00
|
|
|
if err != nil {
|
2019-03-07 12:56:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
defer func() {
|
2021-07-22 10:12:19 +00:00
|
|
|
if err != nil && !errors.Is(err, ErrFlattenInProgress) {
|
2019-08-22 17:19:06 +00:00
|
|
|
errDefer := undoSnapReservation(ctx, rbdSnap, cr)
|
2019-04-22 21:35:39 +00:00
|
|
|
if errDefer != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", req.GetName(), errDefer)
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2019-01-28 19:55:10 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
vol, err := cs.doSnapshotClone(ctx, rbdVol, rbdSnap, cr)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2021-07-22 10:12:19 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.CreateSnapshotResponse{
|
|
|
|
Snapshot: &csi.Snapshot{
|
2020-06-24 07:43:24 +00:00
|
|
|
SizeBytes: vol.VolSize,
|
|
|
|
SnapshotId: vol.VolID,
|
2019-01-28 19:55:10 +00:00
|
|
|
SourceVolumeId: req.GetSourceVolumeId(),
|
2020-06-24 07:43:24 +00:00
|
|
|
CreationTime: vol.CreatedAt,
|
2021-07-22 10:12:19 +00:00
|
|
|
ReadyToUse: true,
|
2019-01-28 19:55:10 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
// cloneFromSnapshot is a helper for CreateSnapshot that continues creating an
|
|
|
|
// RBD image from an RBD snapshot if the process was interrupted at one point.
|
2021-06-25 11:39:42 +00:00
|
|
|
func cloneFromSnapshot(
|
|
|
|
ctx context.Context,
|
|
|
|
rbdVol *rbdVolume,
|
|
|
|
rbdSnap *rbdSnapshot,
|
|
|
|
cr *util.Credentials) (*csi.CreateSnapshotResponse, error) {
|
2021-04-08 09:21:44 +00:00
|
|
|
vol := generateVolFromSnap(rbdSnap)
|
|
|
|
err := vol.Connect(cr)
|
|
|
|
if err != nil {
|
2020-12-01 09:42:53 +00:00
|
|
|
uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr)
|
2021-04-08 09:21:44 +00:00
|
|
|
if uErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
|
2021-04-08 09:21:44 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
return nil, status.Errorf(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
defer vol.Destroy()
|
|
|
|
|
|
|
|
if rbdVol.isEncrypted() {
|
2021-09-28 05:06:20 +00:00
|
|
|
err = rbdVol.copyEncryptionConfig(&vol.rbdImage, false)
|
2021-04-21 12:31:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2021-04-08 09:21:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-18 04:12:48 +00:00
|
|
|
// The clone image created during CreateSnapshot has to be marked as thick.
|
|
|
|
// As snapshot and volume both are independent we cannot depend on the
|
|
|
|
// parent volume of the clone to check thick provision during CreateVolume
|
|
|
|
// from snapshot operation because the parent volume can be deleted anytime
|
|
|
|
// after snapshot is created.
|
|
|
|
// TODO: copy thick provision config
|
|
|
|
thick, err := rbdVol.isThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Errorf(codes.Internal, "failed checking thick-provisioning of %q: %s", rbdVol, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if thick {
|
|
|
|
// check the thick metadata is already set on the clone image.
|
|
|
|
thick, err = vol.isThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Errorf(codes.Internal, "failed checking thick-provisioning of %q: %s", vol, err)
|
|
|
|
}
|
|
|
|
if !thick {
|
|
|
|
err = vol.setThickProvisioned()
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Errorf(codes.Internal, "failed mark %q thick-provisioned: %s", vol, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-04-08 09:21:44 +00:00
|
|
|
|
|
|
|
err = vol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
|
|
|
if errors.Is(err, ErrFlattenInProgress) {
|
2021-07-22 10:12:19 +00:00
|
|
|
// if flattening is in progress, return error and do not cleanup
|
|
|
|
return nil, status.Errorf(codes.Internal, err.Error())
|
2021-04-08 09:21:44 +00:00
|
|
|
} else if err != nil {
|
2020-12-01 09:42:53 +00:00
|
|
|
uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr)
|
2021-04-08 09:21:44 +00:00
|
|
|
if uErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
|
2021-04-08 09:21:44 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
return nil, status.Errorf(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.CreateSnapshotResponse{
|
|
|
|
Snapshot: &csi.Snapshot{
|
|
|
|
SizeBytes: rbdSnap.SizeBytes,
|
|
|
|
SnapshotId: rbdSnap.VolID,
|
|
|
|
SourceVolumeId: rbdSnap.SourceVolumeID,
|
|
|
|
CreationTime: rbdSnap.CreatedAt,
|
2021-07-22 10:12:19 +00:00
|
|
|
ReadyToUse: true,
|
2021-04-08 09:21:44 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.CreateSnapshotRequest) error {
|
2021-06-25 11:39:42 +00:00
|
|
|
if err := cs.Driver.ValidateControllerServiceRequest(
|
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check sanity of request Snapshot Name, Source Volume Id
|
2019-06-10 06:48:41 +00:00
|
|
|
if req.Name == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "snapshot Name cannot be empty")
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
2019-06-10 06:48:41 +00:00
|
|
|
if req.SourceVolumeId == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "source Volume ID cannot be empty")
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-02-24 13:19:42 +00:00
|
|
|
options := req.GetParameters()
|
|
|
|
if value, ok := options["snapshotNamePrefix"]; ok && value == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty snapshot name prefix to provision snapshot from")
|
|
|
|
}
|
2021-03-18 11:28:23 +00:00
|
|
|
if value, ok := options["pool"]; ok && value == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty pool name in which rbd image will be created")
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) doSnapshotClone(
|
|
|
|
ctx context.Context,
|
|
|
|
parentVol *rbdVolume,
|
|
|
|
rbdSnap *rbdSnapshot,
|
2021-07-22 10:12:19 +00:00
|
|
|
cr *util.Credentials) (*rbdVolume, error) {
|
2020-06-24 07:43:24 +00:00
|
|
|
// generate cloned volume details from snapshot
|
|
|
|
cloneRbd := generateVolFromSnap(rbdSnap)
|
|
|
|
defer cloneRbd.Destroy()
|
|
|
|
// add image feature for cloneRbd
|
|
|
|
f := []string{librbd.FeatureNameLayering, librbd.FeatureNameDeepFlatten}
|
|
|
|
cloneRbd.imageFeatureSet = librbd.FeatureSetFromNames(f)
|
|
|
|
|
|
|
|
err := cloneRbd.Connect(cr)
|
|
|
|
if err != nil {
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = createRBDClone(ctx, parentVol, cloneRbd, rbdSnap, cr)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create snapshot: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrFlattenInProgress) {
|
2020-06-24 07:43:24 +00:00
|
|
|
// cleanup clone and snapshot
|
|
|
|
errCleanUp := cleanUpSnapshot(ctx, cloneRbd, rbdSnap, cloneRbd, cr)
|
|
|
|
if errCleanUp != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to cleanup snapshot and clone: %v", errCleanUp)
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
}()
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2021-03-31 13:22:26 +00:00
|
|
|
if parentVol.isEncrypted() {
|
2021-09-28 05:06:20 +00:00
|
|
|
cryptErr := parentVol.copyEncryptionConfig(&cloneRbd.rbdImage, false)
|
2021-03-31 13:22:26 +00:00
|
|
|
if cryptErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed copy encryption "+
|
2021-05-07 05:30:37 +00:00
|
|
|
"config for %q: %v", cloneRbd, cryptErr)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return nil, err
|
2021-03-31 13:22:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-18 04:12:48 +00:00
|
|
|
// The clone image created during CreateSnapshot has to be marked as thick.
|
|
|
|
// As snapshot and volume both are independent we cannot depend on the
|
|
|
|
// parent volume of the clone to check thick provision during CreateVolume
|
|
|
|
// from snapshot operation because the parent volume can be deleted anytime
|
|
|
|
// after snapshot is created.
|
|
|
|
thick, err := parentVol.isThickProvisioned()
|
|
|
|
if err != nil {
|
2021-07-22 10:12:19 +00:00
|
|
|
return nil, fmt.Errorf("failed checking thick-provisioning of %q: %w", parentVol, err)
|
2021-06-18 04:12:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if thick {
|
|
|
|
err = cloneRbd.setThickProvisioned()
|
|
|
|
if err != nil {
|
2021-07-22 10:12:19 +00:00
|
|
|
return nil, fmt.Errorf("failed mark %q thick-provisioned: %w", cloneRbd, err)
|
2021-06-18 04:12:48 +00:00
|
|
|
}
|
2021-06-21 09:05:24 +00:00
|
|
|
} else {
|
|
|
|
err = cloneRbd.createSnapshot(ctx, rbdSnap)
|
|
|
|
if err != nil {
|
|
|
|
// update rbd image name for logging
|
|
|
|
rbdSnap.RbdImageName = cloneRbd.RbdImageName
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2021-06-21 09:05:24 +00:00
|
|
|
}
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = cloneRbd.getImageID()
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get image id: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
// save image ID
|
2021-07-13 12:21:05 +00:00
|
|
|
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to connect to cluster: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
2020-10-14 16:19:03 +00:00
|
|
|
err = j.StoreImageID(ctx, rbdSnap.JournalPool, rbdSnap.ReservedID, cloneRbd.ImageID)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to reserve volume id: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
err = cloneRbd.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, nil
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// DeleteSnapshot deletes the snapshot in backend and removes the
|
2020-07-19 12:21:03 +00:00
|
|
|
// snapshot metadata from store.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) DeleteSnapshot(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
|
|
|
|
if err := cs.Driver.ValidateControllerServiceRequest(
|
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-08-08 05:42:17 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-25 19:29:17 +00:00
|
|
|
cr, err := util.NewUserCredentials(req.GetSecrets())
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2018-08-08 05:42:17 +00:00
|
|
|
snapshotID := req.GetSnapshotId()
|
2019-06-10 06:48:41 +00:00
|
|
|
if snapshotID == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "snapshot ID cannot be empty")
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2019-01-28 13:59:16 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID)
|
|
|
|
}
|
|
|
|
defer cs.SnapshotLocks.Release(snapshotID)
|
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
// lock out snapshotID for restore operation
|
|
|
|
if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
|
|
|
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
|
|
|
|
|
2018-08-08 05:42:17 +00:00
|
|
|
rbdSnap := &rbdSnapshot{}
|
2021-03-30 20:08:24 +00:00
|
|
|
if err = genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil {
|
2021-08-17 05:39:56 +00:00
|
|
|
// if error is ErrPoolNotFound, the pool is already deleted we don't
|
2020-01-31 08:49:11 +00:00
|
|
|
// need to worry about deleting snapshot or omap data, return success
|
2020-07-08 23:00:23 +00:00
|
|
|
if errors.Is(err, util.ErrPoolNotFound) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-01-31 08:49:11 +00:00
|
|
|
return &csi.DeleteSnapshotResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-06-28 01:10:32 +00:00
|
|
|
// if error is ErrKeyNotFound, then a previous attempt at deletion was complete
|
|
|
|
// or partially complete (snap and snapOMap are garbage collected already), hence return
|
|
|
|
// success as deletion is complete
|
2020-07-08 23:00:23 +00:00
|
|
|
if errors.Is(err, util.ErrKeyNotFound) {
|
2019-06-28 01:10:32 +00:00
|
|
|
return &csi.DeleteSnapshotResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
// safeguard against parallel create or delete requests against the same
|
|
|
|
// name
|
|
|
|
if acquired := cs.SnapshotLocks.TryAcquire(rbdSnap.RequestName); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, rbdSnap.RequestName)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdSnap.RequestName)
|
|
|
|
}
|
|
|
|
defer cs.SnapshotLocks.Release(rbdSnap.RequestName)
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
// Deleting snapshot and cloned volume
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "deleting cloned rbd volume %s", rbdSnap.RbdSnapName)
|
2020-06-24 07:43:24 +00:00
|
|
|
|
|
|
|
rbdVol := generateVolFromSnap(rbdSnap)
|
|
|
|
|
|
|
|
err = rbdVol.Connect(cr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
defer rbdVol.Destroy()
|
|
|
|
|
|
|
|
err = rbdVol.getImageInfo()
|
|
|
|
if err != nil {
|
2021-10-05 11:10:51 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
|
|
|
err = rbdVol.ensureImageCleanup(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
} else {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
} else {
|
2020-07-06 06:22:34 +00:00
|
|
|
rbdVol.ImageID = rbdSnap.ImageID
|
2020-06-24 07:43:24 +00:00
|
|
|
// update parent name to delete the snapshot
|
|
|
|
rbdSnap.RbdImageName = rbdVol.RbdImageName
|
|
|
|
err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol, cr)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete image: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = undoSnapReservation(ctx, rbdSnap, cr)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) on image (%s) (%s)",
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdSnap.RequestName, rbdSnap.RbdSnapName, rbdSnap.RbdImageName, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteSnapshotResponse{}, nil
|
|
|
|
}
|
2019-11-27 12:14:31 +00:00
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// ControllerExpandVolume expand RBD Volumes on demand based on resizer request.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) ControllerExpandVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
2021-11-19 04:40:12 +00:00
|
|
|
err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid expand volume req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-11-27 12:14:31 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
volID := req.GetVolumeId()
|
|
|
|
if volID == "" {
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "volume ID cannot be empty")
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
capRange := req.GetCapacityRange()
|
|
|
|
if capRange == nil {
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "capacityRange cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
// lock out parallel requests against the same volume ID
|
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
2019-12-13 10:29:33 +00:00
|
|
|
defer cs.VolumeLocks.Release(volID)
|
2019-11-27 12:14:31 +00:00
|
|
|
|
2021-11-19 04:40:12 +00:00
|
|
|
cr, err := util.NewUserCredentialsWithMigration(req.GetSecrets())
|
2019-11-27 12:14:31 +00:00
|
|
|
if err != nil {
|
2021-11-19 04:40:12 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
|
|
|
defer cr.DeleteCredentials()
|
2021-12-22 05:59:09 +00:00
|
|
|
rbdVol, err := genVolFromVolIDWithMigration(ctx, volID, cr, req.GetSecrets())
|
2019-11-27 12:14:31 +00:00
|
|
|
if err != nil {
|
2021-01-26 14:08:59 +00:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, ErrImageNotFound):
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.NotFound, "volume ID %s not found", volID)
|
2021-01-26 14:08:59 +00:00
|
|
|
case errors.Is(err, util.ErrPoolNotFound):
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend volume for %s: %v", volID, err)
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.NotFound, err.Error())
|
2021-01-26 14:08:59 +00:00
|
|
|
default:
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.Internal, err.Error())
|
2020-01-31 08:49:11 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-05-11 08:48:52 +00:00
|
|
|
return nil, err
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
2021-12-22 05:59:09 +00:00
|
|
|
defer rbdVol.Destroy()
|
2019-11-27 12:14:31 +00:00
|
|
|
|
2021-07-08 15:06:42 +00:00
|
|
|
// NodeExpansion is needed for PersistentVolumes with,
|
|
|
|
// 1. Filesystem VolumeMode with & without Encryption and
|
|
|
|
// 2. Block VolumeMode with Encryption
|
|
|
|
// Hence set nodeExpansion flag based on VolumeMode and Encryption status
|
|
|
|
nodeExpansion := true
|
|
|
|
if req.GetVolumeCapability().GetBlock() != nil && !rbdVol.isEncrypted() {
|
|
|
|
nodeExpansion = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// lock out volumeID for clone and delete operation
|
|
|
|
if err = cs.OperationLocks.GetExpandLock(volID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-08 15:06:42 +00:00
|
|
|
|
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
2020-01-29 11:44:45 +00:00
|
|
|
}
|
2021-07-08 15:06:42 +00:00
|
|
|
defer cs.OperationLocks.ReleaseExpandLock(volID)
|
2020-01-29 11:44:45 +00:00
|
|
|
|
2019-11-27 12:14:31 +00:00
|
|
|
// always round up the request size in bytes to the nearest MiB/GiB
|
|
|
|
volSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
|
|
|
|
|
|
|
|
// resize volume if required
|
|
|
|
if rbdVol.VolSize < volSize {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd volume %s size is %v,resizing to %v", rbdVol, rbdVol.VolSize, volSize)
|
2020-07-30 06:28:51 +00:00
|
|
|
err = rbdVol.resize(volSize)
|
2019-11-27 12:14:31 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to resize rbd image: %s with error: %v", rbdVol, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-11-27 12:14:31 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.ControllerExpandVolumeResponse{
|
2019-12-13 10:29:33 +00:00
|
|
|
CapacityBytes: rbdVol.VolSize,
|
2019-11-27 12:14:31 +00:00
|
|
|
NodeExpansionRequired: nodeExpansion,
|
|
|
|
}, nil
|
|
|
|
}
|
2021-05-26 08:29:32 +00:00
|
|
|
|
rbd: note that thick-provisioning is deprecated
Thick-provisioning was introduced to make accounting of assigned space
for volumes easier. When thick-provisioned volumes are the only consumer
of the Ceph cluster, this works fine. However, it is unlikely that this
is the case. Instead, accounting of the requested (thin-provisioned)
size of volumes is much more practical as different types of volumes can
be tracked.
OpenShift already provides cluster-wide quotas, which can combine
accounting of requested volumes by grouping different StorageClasses.
In addition to the difficult practise of allowing only thick-provisioned
RBD backed volumes, the performance makes thick-provisioning
troublesome. As volumes need to be completely allocated, data needs to
be written to the volume. This can take a long time, depending on the
size of the volume. Provisioning, cloning and snapshotting becomes very
much noticeable, and because of the additional time consumption, more
prone to failures.
Signed-off-by: Niels de Vos <ndevos@redhat.com>
2021-10-25 13:05:33 +00:00
|
|
|
// logThickProvisioningDeprecation makes sure the deprecation warning about
|
|
|
|
// thick-provisining is logged only once.
|
|
|
|
var logThickProvisioningDeprecation = true
|
|
|
|
|
2021-05-26 08:29:32 +00:00
|
|
|
// isThickProvisionRequest returns true in case the request contains the
|
|
|
|
// `thickProvision` option set to `true`.
|
2021-06-15 07:23:47 +00:00
|
|
|
func isThickProvisionRequest(parameters map[string]string) bool {
|
2021-05-26 08:29:32 +00:00
|
|
|
tp := "thickProvision"
|
|
|
|
|
2021-06-15 07:23:47 +00:00
|
|
|
thick, ok := parameters[tp]
|
2021-05-26 08:29:32 +00:00
|
|
|
if !ok || thick == "" {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
thickBool, err := strconv.ParseBool(thick)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
rbd: note that thick-provisioning is deprecated
Thick-provisioning was introduced to make accounting of assigned space
for volumes easier. When thick-provisioned volumes are the only consumer
of the Ceph cluster, this works fine. However, it is unlikely that this
is the case. Instead, accounting of the requested (thin-provisioned)
size of volumes is much more practical as different types of volumes can
be tracked.
OpenShift already provides cluster-wide quotas, which can combine
accounting of requested volumes by grouping different StorageClasses.
In addition to the difficult practise of allowing only thick-provisioned
RBD backed volumes, the performance makes thick-provisioning
troublesome. As volumes need to be completely allocated, data needs to
be written to the volume. This can take a long time, depending on the
size of the volume. Provisioning, cloning and snapshotting becomes very
much noticeable, and because of the additional time consumption, more
prone to failures.
Signed-off-by: Niels de Vos <ndevos@redhat.com>
2021-10-25 13:05:33 +00:00
|
|
|
if logThickProvisioningDeprecation {
|
|
|
|
log.WarningLogMsg("thick-provisioning is deprecated and will " +
|
|
|
|
"be removed in a future release")
|
|
|
|
logThickProvisioningDeprecation = false
|
|
|
|
}
|
|
|
|
|
2021-05-26 08:29:32 +00:00
|
|
|
return thickBool
|
|
|
|
}
|