2018-01-09 18:59:50 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package rbd
|
|
|
|
|
|
|
|
import (
|
2019-08-24 09:14:15 +00:00
|
|
|
"context"
|
2020-06-25 08:35:19 +00:00
|
|
|
"errors"
|
2022-04-25 10:15:08 +00:00
|
|
|
"fmt"
|
2022-05-24 03:38:42 +00:00
|
|
|
"strconv"
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2020-04-17 09:23:49 +00:00
|
|
|
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2022-03-15 11:01:38 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/k8s"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2019-02-18 11:30:28 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
librbd "github.com/ceph/go-ceph/rbd"
|
2019-01-15 16:20:41 +00:00
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
2019-02-08 07:50:21 +00:00
|
|
|
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
|
2018-03-06 22:33:57 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
2018-01-09 18:59:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
oneGB = 1073741824
|
|
|
|
)
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// ControllerServer struct of rbd CSI driver with supported methods of CSI
|
|
|
|
// controller server spec.
|
2019-01-17 07:51:06 +00:00
|
|
|
type ControllerServer struct {
|
2018-01-09 18:59:50 +00:00
|
|
|
*csicommon.DefaultControllerServer
|
2019-09-12 04:53:37 +00:00
|
|
|
// A map storing all volumes with ongoing operations so that additional operations
|
|
|
|
// for that same volume (as defined by VolumeID/volume name) return an Aborted error
|
|
|
|
VolumeLocks *util.VolumeLocks
|
|
|
|
|
|
|
|
// A map storing all volumes with ongoing operations so that additional operations
|
|
|
|
// for that same snapshot (as defined by SnapshotID/snapshot name) return an Aborted error
|
|
|
|
SnapshotLocks *util.VolumeLocks
|
2020-07-13 05:28:17 +00:00
|
|
|
|
|
|
|
// A map storing all volumes/snapshots with ongoing operations.
|
|
|
|
OperationLocks *util.OperationLock
|
2022-04-11 04:27:29 +00:00
|
|
|
|
2024-11-11 11:42:48 +00:00
|
|
|
// A map storing all volumes with ongoing operations so that additional operations
|
|
|
|
// for that same volume (as defined by volumegroup ID/volumegroup name) return an Aborted error
|
|
|
|
VolumeGroupLocks *util.VolumeLocks
|
|
|
|
|
2022-04-11 04:27:29 +00:00
|
|
|
// Cluster name
|
|
|
|
ClusterName string
|
2022-04-12 04:03:00 +00:00
|
|
|
|
|
|
|
// Set metadata on volume
|
|
|
|
SetMetadata bool
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func (cs *ControllerServer) validateVolumeReq(ctx context.Context, req *csi.CreateVolumeRequest) error {
|
2021-06-25 11:39:42 +00:00
|
|
|
if err := cs.Driver.ValidateControllerServiceRequest(
|
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid create volume req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-17 05:27:55 +00:00
|
|
|
return err
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2018-03-06 22:33:57 +00:00
|
|
|
// Check sanity of request Name, Volume Capabilities
|
2024-04-04 08:52:46 +00:00
|
|
|
if req.GetName() == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "volume Name cannot be empty")
|
2018-03-06 22:33:57 +00:00
|
|
|
}
|
2024-04-04 08:52:46 +00:00
|
|
|
if req.GetVolumeCapabilities() == nil {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "volume Capabilities cannot be empty")
|
2018-03-06 22:33:57 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
options := req.GetParameters()
|
2019-06-10 06:48:41 +00:00
|
|
|
if value, ok := options["clusterID"]; !ok || value == "" {
|
2024-02-26 13:59:58 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "empty cluster ID to provision volume from")
|
|
|
|
}
|
|
|
|
poolValue, poolOK := options["pool"]
|
|
|
|
topologyConstrainedPoolsValue, topologyOK := options["topologyConstrainedPools"]
|
|
|
|
if !poolOK {
|
|
|
|
if topologyOK && topologyConstrainedPoolsValue == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty pool name or topologyConstrainedPools to provision volume")
|
|
|
|
} else if !topologyOK {
|
|
|
|
return status.Error(codes.InvalidArgument, "missing or empty pool name to provision volume from")
|
|
|
|
}
|
|
|
|
} else if poolValue == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "missing or empty pool name to provision volume from")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2019-09-10 09:56:08 +00:00
|
|
|
if value, ok := options["dataPool"]; ok && value == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty datapool name to provision volume from")
|
|
|
|
}
|
2020-08-18 10:42:42 +00:00
|
|
|
if value, ok := options["radosNamespace"]; ok && value == "" {
|
2020-06-01 13:57:51 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "empty namespace name to provision volume from")
|
|
|
|
}
|
2020-02-24 13:19:42 +00:00
|
|
|
if value, ok := options["volumeNamePrefix"]; ok && value == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty volume name prefix to provision volume from")
|
|
|
|
}
|
2020-08-17 08:08:57 +00:00
|
|
|
|
|
|
|
// Allow readonly access mode for volume with content source
|
|
|
|
err := util.CheckReadOnlyManyIsSupported(req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-04-04 08:52:46 +00:00
|
|
|
err = validateStriping(req.GetParameters())
|
2022-05-24 03:38:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateStriping(parameters map[string]string) error {
|
|
|
|
stripeUnit := parameters["stripeUnit"]
|
|
|
|
stripeCount := parameters["stripeCount"]
|
|
|
|
if stripeUnit != "" && stripeCount == "" {
|
|
|
|
return errors.New("stripeCount must be specified when stripeUnit is specified")
|
|
|
|
}
|
|
|
|
|
|
|
|
if stripeUnit == "" && stripeCount != "" {
|
|
|
|
return errors.New("stripeUnit must be specified when stripeCount is specified")
|
|
|
|
}
|
|
|
|
|
|
|
|
objectSize := parameters["objectSize"]
|
|
|
|
if objectSize != "" {
|
|
|
|
objSize, err := strconv.ParseUint(objectSize, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to parse objectSize %s: %w", objectSize, err)
|
|
|
|
}
|
|
|
|
// check objectSize is power of 2
|
|
|
|
/*
|
|
|
|
Take 2^3=8 for example.
|
|
|
|
x & (x-1)
|
|
|
|
8 & 7
|
|
|
|
1000 & 0111 = 0000
|
|
|
|
*/
|
|
|
|
if objSize == 0 || (objSize&(objSize-1)) != 0 {
|
|
|
|
return fmt.Errorf("objectSize %s is not power of 2", objectSize)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-17 05:27:55 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-16 01:52:28 +00:00
|
|
|
|
2021-11-19 04:40:12 +00:00
|
|
|
// parseVolCreateRequest take create volume `request` argument and make use of the
|
|
|
|
// request arguments for subsequent calls.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) parseVolCreateRequest(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.CreateVolumeRequest,
|
2024-03-22 13:43:25 +00:00
|
|
|
cr *util.Credentials,
|
2022-06-01 10:17:19 +00:00
|
|
|
) (*rbdVolume, error) {
|
2019-01-29 05:49:16 +00:00
|
|
|
// TODO (sbezverk) Last check for not exceeding total storage capacity
|
|
|
|
|
2021-10-26 07:49:10 +00:00
|
|
|
// below capability check indicates that we support both {SINGLE_NODE or MULTI_NODE} WRITERs and the `isMultiWriter`
|
|
|
|
// flag has been set accordingly.
|
2024-04-04 08:52:46 +00:00
|
|
|
isMultiWriter, isBlock := csicommon.IsBlockMultiWriter(req.GetVolumeCapabilities())
|
2019-03-14 00:18:04 +00:00
|
|
|
|
2021-10-26 07:49:10 +00:00
|
|
|
// below return value has set, if it is RWO mode File PVC.
|
2024-04-04 08:52:46 +00:00
|
|
|
isRWOFile := csicommon.IsFileRWO(req.GetVolumeCapabilities())
|
2021-10-26 07:49:10 +00:00
|
|
|
|
|
|
|
// below return value has set, if it is ReadOnly capability.
|
2024-04-04 08:52:46 +00:00
|
|
|
isROOnly := csicommon.IsReaderOnly(req.GetVolumeCapabilities())
|
2019-03-14 00:18:04 +00:00
|
|
|
// We want to fail early if the user is trying to create a RWX on a non-block type device
|
2021-10-26 07:49:10 +00:00
|
|
|
if !isRWOFile && !isBlock && !isROOnly {
|
2021-06-25 11:39:42 +00:00
|
|
|
return nil, status.Error(
|
|
|
|
codes.InvalidArgument,
|
|
|
|
"multi node access modes are only supported on rbd `block` type volumes")
|
2019-03-14 00:18:04 +00:00
|
|
|
}
|
|
|
|
|
2022-02-17 06:24:47 +00:00
|
|
|
if imageFeatures, ok := req.GetParameters()["imageFeatures"]; !checkValidImageFeatures(imageFeatures, ok) {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, "empty imageFeatures parameter")
|
2021-06-07 07:31:38 +00:00
|
|
|
}
|
|
|
|
|
2021-10-26 07:49:10 +00:00
|
|
|
// if it's NOT SINGLE_NODE_WRITER, and it's BLOCK we'll set the parameter to ignore the in-use checks
|
2021-09-06 05:27:50 +00:00
|
|
|
rbdVol, err := genVolFromVolumeOptions(
|
|
|
|
ctx,
|
2021-10-26 07:49:10 +00:00
|
|
|
req.GetParameters(),
|
|
|
|
isMultiWriter && isBlock,
|
|
|
|
false)
|
2019-01-29 05:49:16 +00:00
|
|
|
if err != nil {
|
2019-03-07 12:56:47 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 04:03:00 +00:00
|
|
|
// set cluster name on volume
|
2022-04-11 04:27:29 +00:00
|
|
|
rbdVol.ClusterName = cs.ClusterName
|
2022-04-12 04:03:00 +00:00
|
|
|
// set metadata on volume
|
|
|
|
rbdVol.EnableMetadata = cs.SetMetadata
|
2022-04-11 04:27:29 +00:00
|
|
|
|
2022-03-15 12:58:02 +00:00
|
|
|
// if the KMS is of type VaultToken, additional metadata is needed
|
|
|
|
// depending on the tenant, the KMS can be configured with other
|
|
|
|
// options
|
|
|
|
// FIXME: this works only on Kubernetes, how do other CO supply metadata?
|
|
|
|
// namespace is derived from the `csi.storage.k8s.io/pvc/namespace`
|
|
|
|
// parameter.
|
|
|
|
|
|
|
|
// get the owner of the PVC which is required for few encryption related operations
|
|
|
|
rbdVol.Owner = k8s.GetOwner(req.GetParameters())
|
|
|
|
|
2024-03-04 15:13:31 +00:00
|
|
|
err = rbdVol.initKMS(ctx, req.GetParameters(), req.GetSecrets())
|
2022-03-15 12:58:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
rbdVol.RequestName = req.GetName()
|
|
|
|
|
2019-01-29 05:49:16 +00:00
|
|
|
// Volume Size - Default is 1 GiB
|
|
|
|
volSizeBytes := int64(oneGB)
|
|
|
|
if req.GetCapacityRange() != nil {
|
|
|
|
volSizeBytes = req.GetCapacityRange().GetRequiredBytes()
|
|
|
|
}
|
2019-03-01 12:08:17 +00:00
|
|
|
|
2019-09-25 08:35:33 +00:00
|
|
|
// always round up the request size in bytes to the nearest MiB/GiB
|
|
|
|
rbdVol.VolSize = util.RoundOffBytes(volSizeBytes)
|
2021-12-20 13:49:35 +00:00
|
|
|
// RequestedVolSize has the size of the volume requested by the user.
|
|
|
|
rbdVol.RequestedVolSize = rbdVol.VolSize
|
2019-01-29 05:49:16 +00:00
|
|
|
|
2020-01-24 16:26:56 +00:00
|
|
|
// start with pool the same as journal pool, in case there is a topology
|
|
|
|
// based split, pool for the image will be updated subsequently
|
|
|
|
rbdVol.JournalPool = rbdVol.Pool
|
|
|
|
|
|
|
|
// store topology information from the request
|
|
|
|
rbdVol.TopologyPools, rbdVol.TopologyRequirement, err = util.GetTopologyFromRequest(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
2024-12-12 09:26:25 +00:00
|
|
|
// Get QosParameters from SC if qos configuration existing in SC
|
|
|
|
err = rbdVol.SetQOS(ctx, req.GetParameters())
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
2024-03-22 13:43:25 +00:00
|
|
|
err = rbdVol.Connect(cr)
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// NOTE: rbdVol does not contain VolID and RbdImageName populated, everything
|
|
|
|
// else is populated post create request parsing
|
2019-01-29 05:49:16 +00:00
|
|
|
return rbdVol, nil
|
|
|
|
}
|
|
|
|
|
2024-07-18 18:40:28 +00:00
|
|
|
func (rbdVol *rbdVolume) ToCSI(ctx context.Context) (*csi.Volume, error) {
|
2024-07-05 08:26:47 +00:00
|
|
|
vol := &csi.Volume{
|
|
|
|
VolumeId: rbdVol.VolID,
|
|
|
|
CapacityBytes: rbdVol.VolSize,
|
|
|
|
VolumeContext: map[string]string{
|
|
|
|
"pool": rbdVol.Pool,
|
|
|
|
"journalPool": rbdVol.JournalPool,
|
|
|
|
"imageName": rbdVol.RbdImageName,
|
|
|
|
},
|
2020-06-01 13:57:51 +00:00
|
|
|
}
|
2022-01-28 11:08:09 +00:00
|
|
|
|
2024-07-05 08:26:47 +00:00
|
|
|
if rbdVol.RadosNamespace != "" {
|
|
|
|
vol.VolumeContext["radosNamespace"] = rbdVol.RadosNamespace
|
2022-01-28 11:08:09 +00:00
|
|
|
}
|
|
|
|
|
2024-07-05 08:26:47 +00:00
|
|
|
if rbdVol.DataPool != "" {
|
|
|
|
vol.VolumeContext["dataPool"] = rbdVol.DataPool
|
2020-04-21 07:52:26 +00:00
|
|
|
}
|
2024-02-26 13:59:58 +00:00
|
|
|
|
2020-04-21 07:52:26 +00:00
|
|
|
if rbdVol.Topology != nil {
|
2024-07-05 08:26:47 +00:00
|
|
|
vol.AccessibleTopology = []*csi.Topology{
|
2022-06-01 10:17:19 +00:00
|
|
|
{
|
|
|
|
Segments: rbdVol.Topology,
|
|
|
|
},
|
|
|
|
}
|
2020-04-21 07:52:26 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2024-07-18 18:40:28 +00:00
|
|
|
return vol, nil
|
2024-07-05 08:26:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func buildCreateVolumeResponse(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.CreateVolumeRequest,
|
|
|
|
rbdVol *rbdVolume,
|
2024-07-18 18:40:28 +00:00
|
|
|
) (*csi.CreateVolumeResponse, error) {
|
|
|
|
volume, err := rbdVol.ToCSI(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Errorf(
|
|
|
|
codes.Internal,
|
|
|
|
"BUG, can not happen: failed to convert volume %q to CSI type: %v",
|
|
|
|
rbdVol, err)
|
|
|
|
}
|
|
|
|
|
2024-07-05 08:26:47 +00:00
|
|
|
volume.ContentSource = req.GetVolumeContentSource()
|
|
|
|
|
|
|
|
for param, value := range util.GetVolumeContext(req.GetParameters()) {
|
|
|
|
volume.VolumeContext[param] = value
|
|
|
|
}
|
|
|
|
|
2024-07-18 18:40:28 +00:00
|
|
|
return &csi.CreateVolumeResponse{Volume: volume}, nil
|
2020-04-21 07:52:26 +00:00
|
|
|
}
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
// getGRPCErrorForCreateVolume converts the returns the GRPC errors based on
|
|
|
|
// the input error types it expected to use only for CreateVolume as we need to
|
|
|
|
// return different GRPC codes for different functions based on the input.
|
|
|
|
func getGRPCErrorForCreateVolume(err error) error {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrVolNameConflict) {
|
2020-07-07 12:14:19 +00:00
|
|
|
return status.Error(codes.AlreadyExists, err.Error())
|
|
|
|
}
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrFlattenInProgress) {
|
2020-07-07 12:14:19 +00:00
|
|
|
return status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2021-12-15 05:29:41 +00:00
|
|
|
func checkValidCreateVolumeRequest(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSnapshot) error {
|
|
|
|
var err error
|
2021-06-01 11:33:29 +00:00
|
|
|
switch {
|
|
|
|
case rbdSnap != nil:
|
|
|
|
err = rbdSnap.isCompatibleEncryption(&rbdVol.rbdImage)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.InvalidArgument, "cannot restore from snapshot %s: %s", rbdSnap, err.Error())
|
|
|
|
}
|
2021-06-17 06:15:29 +00:00
|
|
|
|
2022-01-12 08:53:40 +00:00
|
|
|
err = rbdSnap.isCompabitableClone(&rbdVol.rbdImage)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.InvalidArgument, "cannot restore from snapshot %s: %s", rbdSnap, err.Error())
|
|
|
|
}
|
|
|
|
|
2021-06-01 11:33:29 +00:00
|
|
|
case parentVol != nil:
|
|
|
|
err = parentVol.isCompatibleEncryption(&rbdVol.rbdImage)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.InvalidArgument, "cannot clone from volume %s: %s", parentVol, err.Error())
|
|
|
|
}
|
2021-06-17 06:15:29 +00:00
|
|
|
|
2022-01-12 08:57:05 +00:00
|
|
|
err = parentVol.isCompabitableClone(&rbdVol.rbdImage)
|
|
|
|
if err != nil {
|
|
|
|
return status.Errorf(codes.InvalidArgument, "cannot clone from volume %s: %s", parentVol, err.Error())
|
|
|
|
}
|
2021-06-01 11:33:29 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-01 11:33:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// CreateVolume creates the volume in backend.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) CreateVolume(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.CreateVolumeRequest,
|
|
|
|
) (*csi.CreateVolumeResponse, error) {
|
2021-11-19 04:40:12 +00:00
|
|
|
err := cs.validateVolumeReq(ctx, req)
|
|
|
|
if err != nil {
|
2019-01-17 05:27:55 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2023-06-02 12:39:58 +00:00
|
|
|
// TODO: create/get a connection from the ConnPool, and do not pass the
|
|
|
|
// credentials to any of the utility functions.
|
2021-11-19 04:40:12 +00:00
|
|
|
|
|
|
|
cr, err := util.NewUserCredentialsWithMigration(req.GetSecrets())
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
2021-11-19 04:40:12 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2019-06-01 21:26:42 +00:00
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2024-03-22 13:43:25 +00:00
|
|
|
rbdVol, err := cs.parseVolCreateRequest(ctx, req, cr)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-21 14:01:56 +00:00
|
|
|
defer rbdVol.Destroy(ctx)
|
2019-09-12 04:53:37 +00:00
|
|
|
// Existence and conflict checks
|
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(req.GetName()); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
|
|
|
}
|
|
|
|
defer cs.VolumeLocks.Release(req.GetName())
|
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
parentVol, rbdSnap, err := checkContentSource(ctx, req, cr)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-21 09:59:50 +00:00
|
|
|
if parentVol != nil {
|
2024-03-21 14:01:56 +00:00
|
|
|
defer parentVol.Destroy(ctx)
|
2024-03-21 09:59:50 +00:00
|
|
|
}
|
|
|
|
if rbdSnap != nil {
|
2024-03-21 14:01:56 +00:00
|
|
|
defer rbdSnap.Destroy(ctx)
|
2024-03-21 09:59:50 +00:00
|
|
|
}
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2024-02-26 13:59:58 +00:00
|
|
|
err = updateTopologyConstraints(rbdVol, rbdSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
found, err := rbdVol.Exists(ctx, parentVol)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, getGRPCErrorForCreateVolume(err)
|
2021-05-26 08:33:14 +00:00
|
|
|
} else if found {
|
2024-11-14 08:40:36 +00:00
|
|
|
return cs.repairExistingVolume(ctx, req, rbdVol, rbdSnap)
|
2018-03-06 22:33:57 +00:00
|
|
|
}
|
|
|
|
|
2021-12-15 05:29:41 +00:00
|
|
|
err = checkValidCreateVolumeRequest(rbdVol, parentVol, rbdSnap)
|
2020-07-16 09:58:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-02-21 12:25:23 +00:00
|
|
|
err = flattenParentImage(ctx, parentVol, rbdSnap, cr)
|
2020-07-12 04:36:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-07-07 12:14:19 +00:00
|
|
|
}
|
2020-07-12 04:36:17 +00:00
|
|
|
|
2024-02-26 13:59:58 +00:00
|
|
|
err = reserveVol(ctx, rbdVol, cr)
|
2018-01-16 01:52:28 +00:00
|
|
|
if err != nil {
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-01-16 01:52:28 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2022-02-21 12:25:23 +00:00
|
|
|
errDefer := undoVolReservation(ctx, rbdVol, cr)
|
|
|
|
if errDefer != nil {
|
|
|
|
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer)
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2018-01-16 01:52:28 +00:00
|
|
|
|
2024-12-12 09:26:25 +00:00
|
|
|
err = cs.createBackingImage(ctx, cr, req.GetSecrets(), rbdVol, parentVol, rbdSnap, req.GetParameters())
|
2019-01-28 19:55:10 +00:00
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrFlattenInProgress) {
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, err
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
|
|
|
|
2022-02-21 09:48:18 +00:00
|
|
|
// Set Metadata on PV Create
|
2022-04-28 06:33:30 +00:00
|
|
|
metadata := k8s.GetVolumeMetadata(req.GetParameters())
|
|
|
|
err = rbdVol.setAllMetadata(metadata)
|
2022-02-21 09:48:18 +00:00
|
|
|
if err != nil {
|
2024-07-05 06:46:09 +00:00
|
|
|
if deleteErr := rbdVol.Delete(ctx); deleteErr != nil {
|
2022-10-14 12:46:01 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr)
|
|
|
|
}
|
|
|
|
|
2022-10-14 12:47:52 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2022-02-21 09:48:18 +00:00
|
|
|
}
|
|
|
|
|
2024-07-18 18:40:28 +00:00
|
|
|
return buildCreateVolumeResponse(ctx, req, rbdVol)
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2022-02-21 12:25:23 +00:00
|
|
|
// flattenParentImage is to be called before proceeding with creating volume,
|
|
|
|
// with datasource. This function flattens the parent image accordingly to
|
|
|
|
// make sure no flattening is required during or after the new volume creation.
|
|
|
|
// For parent volume, it's parent(temp clone or snapshot) is flattened.
|
|
|
|
// For parent snapshot, the snapshot itself is flattened.
|
|
|
|
func flattenParentImage(
|
|
|
|
ctx context.Context,
|
|
|
|
rbdVol *rbdVolume,
|
|
|
|
rbdSnap *rbdSnapshot,
|
2022-06-01 10:17:19 +00:00
|
|
|
cr *util.Credentials,
|
|
|
|
) error {
|
2022-02-21 12:25:23 +00:00
|
|
|
// flatten the image's parent before the reservation to avoid
|
|
|
|
// stale entries in post creation if we return ABORT error and the
|
|
|
|
// DeleteVolume RPC is not called.
|
|
|
|
// reducing the limit for cloned images to make sure the limit is in range,
|
|
|
|
// If the intermediate clone reaches the depth we may need to return ABORT
|
|
|
|
// error message as it need to be flatten before continuing, this may leak
|
|
|
|
// omap entries and stale temporary snapshots in corner cases, if we reduce
|
|
|
|
// the limit and check for the depth of the parent image clain itself we
|
|
|
|
// can flatten the parent images before used to avoid the stale omap entries.
|
|
|
|
hardLimit := rbdHardMaxCloneDepth
|
|
|
|
softLimit := rbdSoftMaxCloneDepth
|
2020-07-12 04:36:17 +00:00
|
|
|
if rbdVol != nil {
|
2024-11-14 08:45:16 +00:00
|
|
|
// choosing 3, since cloning image creates a temp clone and a final clone which
|
|
|
|
// will add a total depth of 2 and the parent image itself adds one depth.
|
|
|
|
const depthToAvoidFlatten = 3
|
2022-02-21 12:25:23 +00:00
|
|
|
if rbdHardMaxCloneDepth > depthToAvoidFlatten {
|
|
|
|
hardLimit = rbdHardMaxCloneDepth - depthToAvoidFlatten
|
|
|
|
}
|
|
|
|
if rbdSoftMaxCloneDepth > depthToAvoidFlatten {
|
|
|
|
softLimit = rbdSoftMaxCloneDepth - depthToAvoidFlatten
|
|
|
|
}
|
|
|
|
err := rbdVol.flattenParent(ctx, hardLimit, softLimit)
|
2020-07-12 04:36:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return getGRPCErrorForCreateVolume(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// flatten cloned images if the snapshot count on the parent image
|
|
|
|
// exceeds maxSnapshotsOnImage
|
|
|
|
err = flattenTemporaryClonedImages(ctx, rbdVol, cr)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2022-02-21 12:25:23 +00:00
|
|
|
if rbdSnap != nil {
|
|
|
|
err := rbdSnap.Connect(cr)
|
|
|
|
if err != nil {
|
|
|
|
return getGRPCErrorForCreateVolume(err)
|
|
|
|
}
|
|
|
|
// in case of any error call Destroy for cleanup.
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2024-03-21 14:01:56 +00:00
|
|
|
rbdSnap.Destroy(ctx)
|
2022-02-21 12:25:23 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// choosing 1, since restore from snapshot adds one depth.
|
|
|
|
const depthToAvoidFlatten = 1
|
|
|
|
if rbdHardMaxCloneDepth > depthToAvoidFlatten {
|
|
|
|
hardLimit = rbdHardMaxCloneDepth - depthToAvoidFlatten
|
|
|
|
}
|
|
|
|
if rbdSoftMaxCloneDepth > depthToAvoidFlatten {
|
|
|
|
softLimit = rbdSoftMaxCloneDepth - depthToAvoidFlatten
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rbdSnap.flattenRbdImage(ctx, false, hardLimit, softLimit)
|
|
|
|
if err != nil {
|
|
|
|
return getGRPCErrorForCreateVolume(err)
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-05-26 08:33:14 +00:00
|
|
|
// repairExistingVolume checks the existing volume or snapshot and makes sure
|
|
|
|
// that the state is corrected to what was requested. It is needed to call this
|
|
|
|
// when the process of creating a volume was interrupted.
|
|
|
|
func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.CreateVolumeRequest,
|
2024-11-14 08:40:36 +00:00
|
|
|
rbdVol *rbdVolume, rbdSnap *rbdSnapshot,
|
2022-06-01 10:17:19 +00:00
|
|
|
) (*csi.CreateVolumeResponse, error) {
|
2021-05-26 08:29:32 +00:00
|
|
|
vcs := req.GetVolumeContentSource()
|
|
|
|
|
|
|
|
switch {
|
|
|
|
// rbdVol is a restore from snapshot, rbdSnap is passed
|
|
|
|
case vcs.GetSnapshot() != nil:
|
2024-11-14 08:40:36 +00:00
|
|
|
err := rbdSnap.repairEncryptionConfig(ctx, &rbdVol.rbdImage)
|
2021-05-26 08:33:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-05-26 08:29:32 +00:00
|
|
|
|
2021-12-15 05:32:13 +00:00
|
|
|
// expand the image if the requested size is greater than the current size
|
|
|
|
err = rbdVol.expand()
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to resize volume %s: %v", rbdVol, err)
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-06-17 09:27:11 +00:00
|
|
|
// rbdVol is a clone from parentVol
|
2021-05-26 08:29:32 +00:00
|
|
|
case vcs.GetVolume() != nil:
|
2021-12-15 05:44:24 +00:00
|
|
|
// expand the image if the requested size is greater than the current size
|
|
|
|
err := rbdVol.expand()
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to resize volume %s: %v", rbdVol, err)
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-10-11 06:48:20 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
// setup encryption again to make sure everything is in place.
|
|
|
|
if rbdVol.isBlockEncrypted() {
|
|
|
|
err := rbdVol.setupBlockEncryption(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to setup encryption for image %s: %w", rbdVol, err)
|
|
|
|
}
|
|
|
|
}
|
2021-05-26 08:33:14 +00:00
|
|
|
}
|
|
|
|
|
2022-02-21 11:32:38 +00:00
|
|
|
// Set metadata on restart of provisioner pod when image exist
|
2022-04-28 06:33:30 +00:00
|
|
|
metadata := k8s.GetVolumeMetadata(req.GetParameters())
|
|
|
|
err := rbdVol.setAllMetadata(metadata)
|
2022-02-21 11:32:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-07-18 18:40:28 +00:00
|
|
|
return buildCreateVolumeResponse(ctx, req, rbdVol)
|
2021-05-26 08:33:14 +00:00
|
|
|
}
|
|
|
|
|
2020-11-17 03:34:29 +00:00
|
|
|
// check snapshots on the rbd image, as we have limit from krbd that an image
|
|
|
|
// cannot have more than 510 snapshot at a given point of time. If the
|
|
|
|
// snapshots are more than the `maxSnapshotsOnImage` Add a task to flatten all
|
|
|
|
// the temporary cloned images and return ABORT error message. If the snapshots
|
|
|
|
// are more than the `minSnapshotOnImage` Add a task to flatten all the
|
|
|
|
// temporary cloned images.
|
2020-07-12 04:36:17 +00:00
|
|
|
func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
|
2020-08-27 11:09:45 +00:00
|
|
|
snaps, err := rbdVol.listSnapshots()
|
2020-07-12 04:36:17 +00:00
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
2020-07-12 04:36:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(snaps) > int(maxSnapshotsOnImage) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
"snapshots count %d on image: %s reached configured hard limit %d",
|
|
|
|
len(snaps),
|
|
|
|
rbdVol,
|
|
|
|
maxSnapshotsOnImage)
|
2020-08-27 11:09:45 +00:00
|
|
|
err = flattenClonedRbdImages(
|
|
|
|
ctx,
|
|
|
|
snaps,
|
|
|
|
rbdVol.Pool,
|
|
|
|
rbdVol.Monitors,
|
|
|
|
rbdVol.RbdImageName,
|
|
|
|
cr)
|
2020-07-12 04:36:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
return status.Errorf(codes.ResourceExhausted, "rbd image %s has %d snapshots", rbdVol, len(snaps))
|
|
|
|
}
|
2020-11-17 03:34:29 +00:00
|
|
|
|
|
|
|
if len(snaps) > int(minSnapshotsOnImageToStartFlatten) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
"snapshots count %d on image: %s reached configured soft limit %d",
|
|
|
|
len(snaps),
|
|
|
|
rbdVol,
|
|
|
|
minSnapshotsOnImageToStartFlatten)
|
2020-11-17 03:34:29 +00:00
|
|
|
// If we start flattening all the snapshots at one shot the volume
|
|
|
|
// creation time will be affected,so we will flatten only the extra
|
|
|
|
// snapshots.
|
|
|
|
snaps = snaps[minSnapshotsOnImageToStartFlatten-1:]
|
|
|
|
err = flattenClonedRbdImages(
|
|
|
|
ctx,
|
|
|
|
snaps,
|
|
|
|
rbdVol.Pool,
|
|
|
|
rbdVol.Monitors,
|
|
|
|
rbdVol.RbdImageName,
|
|
|
|
cr)
|
|
|
|
if err != nil {
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-12 04:36:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) createVolumeFromSnapshot(
|
|
|
|
ctx context.Context,
|
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string,
|
|
|
|
rbdVol *rbdVolume,
|
2022-06-01 10:17:19 +00:00
|
|
|
snapshotID string,
|
|
|
|
) error {
|
2020-06-24 07:43:24 +00:00
|
|
|
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID)
|
|
|
|
}
|
|
|
|
defer cs.SnapshotLocks.Release(snapshotID)
|
|
|
|
|
2024-03-21 09:48:50 +00:00
|
|
|
rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, secrets)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2020-07-08 23:00:23 +00:00
|
|
|
if errors.Is(err, util.ErrPoolNotFound) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2024-03-21 14:01:56 +00:00
|
|
|
defer rbdSnap.Destroy(ctx)
|
2020-06-24 07:43:24 +00:00
|
|
|
|
|
|
|
// update parent name(rbd image name in snapshot)
|
|
|
|
rbdSnap.RbdImageName = rbdSnap.RbdSnapName
|
2024-03-26 08:51:19 +00:00
|
|
|
parentVol := rbdSnap.toVolume()
|
2021-03-18 11:23:08 +00:00
|
|
|
// as we are operating on single cluster reuse the connection
|
|
|
|
parentVol.conn = rbdVol.conn.Copy()
|
2024-03-21 14:01:56 +00:00
|
|
|
defer parentVol.Destroy(ctx)
|
2021-03-18 11:23:08 +00:00
|
|
|
|
2022-01-25 07:37:15 +00:00
|
|
|
// create clone image and delete snapshot
|
|
|
|
err = rbdVol.cloneRbdImageFromSnapshot(ctx, rbdSnap, parentVol)
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to clone rbd image %s from snapshot %s: %v", rbdVol, rbdSnap, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2022-01-25 07:37:15 +00:00
|
|
|
return err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2022-06-02 11:01:43 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
log.DebugLog(ctx, "Removing clone image %q", rbdVol)
|
2024-07-05 06:46:09 +00:00
|
|
|
errDefer := rbdVol.Delete(ctx)
|
2022-06-02 11:01:43 +00:00
|
|
|
if errDefer != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to delete clone image %q: %v", rbdVol, errDefer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-04-04 11:56:13 +00:00
|
|
|
err = rbdVol.unsetAllMetadata(k8s.GetSnapshotMetadataKeys())
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to unset snapshot metadata on rbd image %q: %v", rbdVol, err)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2021-12-20 15:01:11 +00:00
|
|
|
log.DebugLog(ctx, "create volume %s from snapshot %s", rbdVol, rbdSnap)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2024-03-04 15:13:31 +00:00
|
|
|
err = parentVol.copyEncryptionConfig(ctx, &rbdVol.rbdImage, true)
|
2022-04-25 10:15:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to copy encryption config for %q: %w", rbdVol, err)
|
|
|
|
}
|
|
|
|
|
2021-12-15 05:32:13 +00:00
|
|
|
// resize the volume if the size is different
|
|
|
|
// expand the image if the requested size is greater than the current size
|
|
|
|
err = rbdVol.expand()
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to resize volume %s: %v", rbdVol, err)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) createBackingImage(
|
|
|
|
ctx context.Context,
|
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string,
|
|
|
|
rbdVol, parentVol *rbdVolume,
|
2022-06-01 10:17:19 +00:00
|
|
|
rbdSnap *rbdSnapshot,
|
2024-12-12 09:26:25 +00:00
|
|
|
scParams map[string]string,
|
2022-06-01 10:17:19 +00:00
|
|
|
) error {
|
2019-01-28 19:55:10 +00:00
|
|
|
var err error
|
|
|
|
|
2021-07-13 12:21:05 +00:00
|
|
|
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
2021-01-26 14:08:59 +00:00
|
|
|
switch {
|
|
|
|
case rbdSnap != nil:
|
2021-03-12 12:50:07 +00:00
|
|
|
if err = cs.OperationLocks.GetRestoreLock(rbdSnap.VolID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
2021-03-12 12:50:07 +00:00
|
|
|
defer cs.OperationLocks.ReleaseRestoreLock(rbdSnap.VolID)
|
2020-07-13 05:28:17 +00:00
|
|
|
|
2021-03-30 20:08:24 +00:00
|
|
|
err = cs.createVolumeFromSnapshot(ctx, cr, secrets, rbdVol, rbdSnap.VolID)
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
2020-01-24 16:26:56 +00:00
|
|
|
return err
|
2019-06-01 21:26:42 +00:00
|
|
|
}
|
2021-01-26 14:08:59 +00:00
|
|
|
case parentVol != nil:
|
2020-07-13 05:28:17 +00:00
|
|
|
if err = cs.OperationLocks.GetCloneLock(parentVol.VolID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return status.Error(codes.Aborted, err.Error())
|
2020-07-07 12:14:19 +00:00
|
|
|
}
|
2020-07-13 05:28:17 +00:00
|
|
|
defer cs.OperationLocks.ReleaseCloneLock(parentVol.VolID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return rbdVol.createCloneFromImage(ctx, parentVol)
|
2021-01-26 14:08:59 +00:00
|
|
|
default:
|
2020-06-24 07:43:24 +00:00
|
|
|
err = createImage(ctx, rbdVol, cr)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create volume: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2021-12-20 15:31:17 +00:00
|
|
|
log.DebugLog(ctx, "created image %s backed for request name %s", rbdVol, rbdVol.RequestName)
|
2020-06-24 07:43:24 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2024-07-05 06:46:09 +00:00
|
|
|
if deleteErr := rbdVol.Delete(ctx); deleteErr != nil {
|
2022-02-21 12:25:23 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr)
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2020-12-04 08:36:05 +00:00
|
|
|
err = rbdVol.storeImageID(ctx, j)
|
2020-01-24 16:26:56 +00:00
|
|
|
if err != nil {
|
2020-06-24 07:43:24 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2020-01-24 16:26:56 +00:00
|
|
|
|
2024-12-12 09:26:25 +00:00
|
|
|
// Apply Qos parameters to rbd image.
|
|
|
|
err = rbdVol.ApplyQOS(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.DebugLog(ctx, "failed apply QOS for rbd image: %v", err)
|
|
|
|
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
// Save Qos parameters from SC in Image medatate, we will use it while resize volume.
|
|
|
|
err = rbdVol.SaveQOS(ctx, scParams)
|
|
|
|
if err != nil {
|
|
|
|
log.DebugLog(ctx, "failed save QOS for rbd image: %v", err)
|
|
|
|
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
2019-12-13 11:41:32 +00:00
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func checkContentSource(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.CreateVolumeRequest,
|
2022-06-01 10:17:19 +00:00
|
|
|
cr *util.Credentials,
|
|
|
|
) (*rbdVolume, *rbdSnapshot, error) {
|
2024-04-04 08:52:46 +00:00
|
|
|
if req.GetVolumeContentSource() == nil {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, nil, nil
|
2020-01-24 16:26:56 +00:00
|
|
|
}
|
2024-04-04 08:52:46 +00:00
|
|
|
volumeSource := req.GetVolumeContentSource()
|
|
|
|
switch volumeSource.GetType().(type) {
|
2020-07-07 12:14:19 +00:00
|
|
|
case *csi.VolumeContentSource_Snapshot:
|
2024-04-04 08:52:46 +00:00
|
|
|
snapshot := req.GetVolumeContentSource().GetSnapshot()
|
2020-07-07 12:14:19 +00:00
|
|
|
if snapshot == nil {
|
|
|
|
return nil, nil, status.Error(codes.NotFound, "volume Snapshot cannot be empty")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-07-07 12:14:19 +00:00
|
|
|
snapshotID := snapshot.GetSnapshotId()
|
|
|
|
if snapshotID == "" {
|
|
|
|
return nil, nil, status.Errorf(codes.NotFound, "volume Snapshot ID cannot be empty")
|
|
|
|
}
|
2024-03-21 09:48:50 +00:00
|
|
|
rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, req.GetSecrets())
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrSnapNotFound) {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-11-24 11:54:29 +00:00
|
|
|
return nil, nil, status.Errorf(codes.NotFound, "%s snapshot does not exist", snapshotID)
|
2020-01-31 08:49:11 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, rbdSnap, nil
|
|
|
|
case *csi.VolumeContentSource_Volume:
|
2024-04-04 08:52:46 +00:00
|
|
|
vol := req.GetVolumeContentSource().GetVolume()
|
2020-07-07 12:14:19 +00:00
|
|
|
if vol == nil {
|
|
|
|
return nil, nil, status.Error(codes.NotFound, "volume cannot be empty")
|
|
|
|
}
|
|
|
|
volID := vol.GetVolumeId()
|
|
|
|
if volID == "" {
|
|
|
|
return nil, nil, status.Errorf(codes.NotFound, "volume ID cannot be empty")
|
|
|
|
}
|
2021-12-09 08:00:52 +00:00
|
|
|
rbdvol, err := GenVolFromVolID(ctx, volID, cr, req.GetSecrets())
|
2020-07-07 12:14:19 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend image for %s: %v", volID, err)
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrImageNotFound) {
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-11-24 11:54:29 +00:00
|
|
|
return nil, nil, status.Errorf(codes.NotFound, "%s image does not exist", volID)
|
2020-07-07 12:14:19 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return rbdvol, nil, nil
|
2019-01-17 05:27:55 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
return nil, nil, status.Errorf(codes.InvalidArgument, "not a proper volume source")
|
2019-01-17 05:27:55 +00:00
|
|
|
}
|
2019-01-17 06:49:35 +00:00
|
|
|
|
2021-12-09 08:00:52 +00:00
|
|
|
// checkErrAndUndoReserve work on error from GenVolFromVolID() and undo omap reserve.
|
2021-09-02 04:01:55 +00:00
|
|
|
// Even-though volumeID is part of rbdVolume struct we take it as an arg here, the main reason
|
2021-12-09 08:00:52 +00:00
|
|
|
// being, the volume id is getting filled from `GenVolFromVolID->generateVolumeFromVolumeID` call path,
|
2021-09-02 04:01:55 +00:00
|
|
|
// and this function is operating on the error case/scenario of above call chain, so we can not rely
|
|
|
|
// on the 'rbdvol->rbdimage->voldID' field.
|
|
|
|
|
|
|
|
func (cs *ControllerServer) checkErrAndUndoReserve(
|
|
|
|
ctx context.Context,
|
|
|
|
err error,
|
|
|
|
volumeID string,
|
2022-06-01 10:17:19 +00:00
|
|
|
rbdVol *rbdVolume, cr *util.Credentials,
|
|
|
|
) (*csi.DeleteVolumeResponse, error) {
|
2021-09-02 04:01:55 +00:00
|
|
|
if errors.Is(err, util.ErrPoolNotFound) {
|
|
|
|
log.WarningLog(ctx, "failed to get backend volume for %s: %v", volumeID, err)
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// if error is ErrKeyNotFound, then a previous attempt at deletion was complete
|
|
|
|
// or partially complete (image and imageOMap are garbage collected already), hence return
|
|
|
|
// success as deletion is complete
|
|
|
|
if errors.Is(err, util.ErrKeyNotFound) {
|
|
|
|
log.WarningLog(ctx, "failed to volume options for %s: %v", volumeID, err)
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2021-08-23 11:34:29 +00:00
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
2024-10-01 13:10:27 +00:00
|
|
|
notFoundErr := rbdVol.ensureImageCleanup(ctx)
|
|
|
|
if notFoundErr != nil {
|
|
|
|
return nil, status.Errorf(codes.Internal, "failed to cleanup image %q: %v", rbdVol, notFoundErr)
|
2021-08-23 11:34:29 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// All errors other than ErrImageNotFound should return an error back to the caller
|
2021-09-02 04:01:55 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// If error is ErrImageNotFound then we failed to find the image, but found the imageOMap
|
|
|
|
// to lead us to the image, hence the imageOMap needs to be garbage collected, by calling
|
|
|
|
// unreserve for the same
|
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired {
|
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
|
|
|
|
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
|
|
|
}
|
|
|
|
defer cs.VolumeLocks.Release(rbdVol.RequestName)
|
|
|
|
|
|
|
|
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// DeleteVolume deletes the volume in backend and removes the volume metadata
|
2021-09-02 04:01:55 +00:00
|
|
|
// from store.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) DeleteVolume(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.DeleteVolumeRequest,
|
|
|
|
) (*csi.DeleteVolumeResponse, error) {
|
2021-10-24 08:48:28 +00:00
|
|
|
var err error
|
|
|
|
if err = cs.Driver.ValidateControllerServiceRequest(
|
2021-06-25 11:39:42 +00:00
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid delete volume req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
// For now the image get unconditionally deleted, but here retention policy can be checked
|
2018-01-16 01:52:28 +00:00
|
|
|
volumeID := req.GetVolumeId()
|
2019-04-22 21:35:39 +00:00
|
|
|
if volumeID == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2019-01-28 13:59:16 +00:00
|
|
|
|
2021-11-19 04:40:12 +00:00
|
|
|
cr, err := util.NewUserCredentialsWithMigration(req.GetSecrets())
|
2021-10-24 08:48:28 +00:00
|
|
|
if err != nil {
|
2021-11-19 04:40:12 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2021-10-24 08:48:28 +00:00
|
|
|
}
|
|
|
|
defer cr.DeleteCredentials()
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(volumeID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
|
|
|
}
|
|
|
|
defer cs.VolumeLocks.Release(volumeID)
|
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
// lock out volumeID for clone and expand operation
|
|
|
|
if err = cs.OperationLocks.GetDeleteLock(volumeID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
|
|
|
defer cs.OperationLocks.ReleaseDeleteLock(volumeID)
|
|
|
|
|
2021-10-25 09:17:12 +00:00
|
|
|
// if this is a migration request volID, delete the volume in backend
|
2021-10-01 04:18:19 +00:00
|
|
|
if isMigrationVolID(volumeID) {
|
2021-10-25 09:17:12 +00:00
|
|
|
pmVolID, pErr := parseMigrationVolID(volumeID)
|
|
|
|
if pErr != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, pErr.Error())
|
|
|
|
}
|
|
|
|
pErr = deleteMigratedVolume(ctx, pmVolID, cr)
|
|
|
|
if pErr != nil && !errors.Is(pErr, ErrImageNotFound) {
|
|
|
|
return nil, status.Error(codes.Internal, pErr.Error())
|
2021-10-01 04:18:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2021-11-19 04:40:12 +00:00
|
|
|
rbdVol, err := GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
2023-09-04 09:27:20 +00:00
|
|
|
defer func() {
|
|
|
|
if rbdVol != nil {
|
2024-03-21 14:01:56 +00:00
|
|
|
rbdVol.Destroy(ctx)
|
2023-09-04 09:27:20 +00:00
|
|
|
}
|
|
|
|
}()
|
2020-03-17 13:39:35 +00:00
|
|
|
if err != nil {
|
2021-09-02 04:01:55 +00:00
|
|
|
return cs.checkErrAndUndoReserve(ctx, err, volumeID, rbdVol, cr)
|
2019-02-26 13:19:00 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// lock out parallel create requests against the same volume name as we
|
2021-08-17 05:39:56 +00:00
|
|
|
// clean up the image and associated omaps for the same
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
|
|
|
|
}
|
|
|
|
defer cs.VolumeLocks.Release(rbdVol.RequestName)
|
2019-02-18 08:22:52 +00:00
|
|
|
|
2021-08-12 07:28:13 +00:00
|
|
|
return cleanupRBDImage(ctx, rbdVol, cr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// cleanupRBDImage removes the rbd image and OMAP metadata associated with it.
|
|
|
|
func cleanupRBDImage(ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
rbdVol *rbdVolume, cr *util.Credentials,
|
|
|
|
) (*csi.DeleteVolumeResponse, error) {
|
2024-07-30 16:42:35 +00:00
|
|
|
info, err := rbdVol.GetMirroringInfo(ctx)
|
2021-08-12 07:28:13 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-08-12 07:28:13 +00:00
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
// Cleanup only omap data if the following condition is met
|
|
|
|
// Mirroring is enabled on the image
|
|
|
|
// Local image is secondary
|
|
|
|
// Local image is in up+replaying state
|
2024-07-26 08:01:02 +00:00
|
|
|
if info.GetState() == librbd.MirrorImageEnabled.String() && !info.IsPrimary() {
|
2021-08-12 07:28:13 +00:00
|
|
|
// If the image is in a secondary state and its up+replaying means its
|
|
|
|
// an healthy secondary and the image is primary somewhere in the
|
|
|
|
// remote cluster and the local image is getting replayed. Delete the
|
|
|
|
// OMAP data generated as we cannot delete the secondary image. When
|
|
|
|
// the image on the primary cluster gets deleted/mirroring disabled,
|
|
|
|
// the image on all the remote (secondary) clusters will get
|
|
|
|
// auto-deleted. This helps in garbage collecting the OMAP, PVC and PV
|
|
|
|
// objects after failback operation.
|
2024-07-30 16:42:35 +00:00
|
|
|
sts, rErr := rbdVol.GetGlobalMirroringStatus(ctx)
|
2021-08-12 07:28:13 +00:00
|
|
|
if rErr != nil {
|
|
|
|
return nil, status.Error(codes.Internal, rErr.Error())
|
|
|
|
}
|
2024-07-26 08:01:02 +00:00
|
|
|
|
|
|
|
localStatus, rErr := sts.GetLocalSiteStatus()
|
|
|
|
if rErr != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to get local status for volume %s: %w", rbdVol.RbdImageName, rErr)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, rErr.Error())
|
|
|
|
}
|
|
|
|
if localStatus.IsUP() && localStatus.GetState() == librbd.MirrorImageStatusStateReplaying.String() {
|
2021-08-12 07:28:13 +00:00
|
|
|
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)",
|
2021-08-12 07:28:13 +00:00
|
|
|
rbdVol.RequestName, rbdVol.RbdImageName, err)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx,
|
2021-08-12 07:28:13 +00:00
|
|
|
"secondary image status is up=%t and state=%s",
|
2024-07-26 08:01:02 +00:00
|
|
|
localStatus.IsUP(),
|
|
|
|
localStatus.GetState())
|
2021-08-12 07:28:13 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 13:33:36 +00:00
|
|
|
inUse, err := rbdVol.isInUse()
|
2020-07-02 06:45:47 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed getting information for image (%s): (%s)", rbdVol, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-02 06:45:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2020-07-22 13:33:36 +00:00
|
|
|
if inUse {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "rbd %s is still being used", rbdVol)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-02 06:45:47 +00:00
|
|
|
return nil, status.Errorf(codes.Internal, "rbd %s is still being used", rbdVol.RbdImageName)
|
|
|
|
}
|
|
|
|
|
2020-07-07 12:14:19 +00:00
|
|
|
// delete the temporary rbd image created as part of volume clone during
|
|
|
|
// create volume
|
2024-06-06 11:44:08 +00:00
|
|
|
err = rbdVol.DeleteTempImage(ctx)
|
2020-07-07 12:14:19 +00:00
|
|
|
if err != nil {
|
2024-06-06 11:44:08 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete temporary rbd image: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2024-06-06 11:44:08 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2020-07-07 12:14:19 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// Deleting rbd image
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "deleting image %s", rbdVol.RbdImageName)
|
2024-07-05 06:46:09 +00:00
|
|
|
if err = rbdVol.Delete(ctx); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v",
|
2020-05-28 18:39:44 +00:00
|
|
|
rbdVol, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2019-02-18 08:22:52 +00:00
|
|
|
}
|
|
|
|
|
2020-01-29 11:44:45 +00:00
|
|
|
if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)",
|
2019-05-31 18:09:24 +00:00
|
|
|
rbdVol.RequestName, rbdVol.RbdImageName, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-05-31 18:09:24 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return &csi.DeleteVolumeResponse{}, nil
|
2019-02-18 08:22:52 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// ValidateVolumeCapabilities checks whether the volume capabilities requested
|
|
|
|
// are supported.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) ValidateVolumeCapabilities(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.ValidateVolumeCapabilitiesRequest,
|
|
|
|
) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
2019-04-22 21:35:39 +00:00
|
|
|
if req.GetVolumeId() == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
2024-04-04 08:52:46 +00:00
|
|
|
if len(req.GetVolumeCapabilities()) == 0 {
|
2019-05-13 04:47:17 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "empty volume capabilities in request")
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
2024-04-04 08:52:46 +00:00
|
|
|
for _, capability := range req.GetVolumeCapabilities() {
|
2020-10-21 07:00:55 +00:00
|
|
|
if capability.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
|
2019-03-13 18:19:14 +00:00
|
|
|
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
|
2018-01-18 19:13:08 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-11-24 19:18:24 +00:00
|
|
|
return &csi.ValidateVolumeCapabilitiesResponse{
|
|
|
|
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
|
2024-04-04 08:52:46 +00:00
|
|
|
VolumeCapabilities: req.GetVolumeCapabilities(),
|
2018-11-24 19:18:24 +00:00
|
|
|
},
|
|
|
|
}, nil
|
2018-01-18 19:13:08 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
// CreateSnapshot creates the snapshot in backend and stores metadata in store.
|
2023-06-02 09:49:22 +00:00
|
|
|
//
|
2023-06-02 08:59:52 +00:00
|
|
|
//nolint:gocyclo,cyclop // TODO: reduce complexity.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) CreateSnapshot(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.CreateSnapshotRequest,
|
|
|
|
) (*csi.CreateSnapshotResponse, error) {
|
2019-08-22 16:57:23 +00:00
|
|
|
if err := cs.validateSnapshotReq(ctx, req); err != nil {
|
2019-01-28 19:55:10 +00:00
|
|
|
return nil, err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2019-01-28 13:59:16 +00:00
|
|
|
|
2019-06-25 19:29:17 +00:00
|
|
|
cr, err := util.NewUserCredentials(req.GetSecrets())
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// Fetch source volume information
|
2021-12-09 08:00:52 +00:00
|
|
|
rbdVol, err := GenVolFromVolID(ctx, req.GetSourceVolumeId(), cr, req.GetSecrets())
|
2023-09-06 09:06:06 +00:00
|
|
|
defer func() {
|
|
|
|
if rbdVol != nil {
|
2024-03-21 14:01:56 +00:00
|
|
|
rbdVol.Destroy(ctx)
|
2023-09-06 09:06:06 +00:00
|
|
|
}
|
|
|
|
}()
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2021-01-26 14:08:59 +00:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, ErrImageNotFound):
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.NotFound, "source Volume ID %s not found", req.GetSourceVolumeId())
|
2021-01-26 14:08:59 +00:00
|
|
|
case errors.Is(err, util.ErrPoolNotFound):
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend volume for %s: %v", req.GetSourceVolumeId(), err)
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.NotFound, err.Error())
|
2021-01-26 14:08:59 +00:00
|
|
|
default:
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.Internal, err.Error())
|
2020-01-31 08:49:11 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-05-11 08:48:52 +00:00
|
|
|
return nil, err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2022-04-12 04:03:00 +00:00
|
|
|
rbdVol.EnableMetadata = cs.SetMetadata
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// Check if source volume was created with required image features for snaps
|
2020-06-18 11:33:06 +00:00
|
|
|
if !rbdVol.hasSnapshotFeature() {
|
2021-06-25 11:39:42 +00:00
|
|
|
return nil, status.Errorf(
|
|
|
|
codes.InvalidArgument,
|
|
|
|
"volume(%s) has not snapshot feature(layering)",
|
|
|
|
req.GetSourceVolumeId())
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2020-07-30 08:54:15 +00:00
|
|
|
rbdSnap, err := genSnapFromOptions(ctx, rbdVol, req.GetParameters())
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
rbdSnap.RbdImageName = rbdVol.RbdImageName
|
2021-12-20 14:14:10 +00:00
|
|
|
rbdSnap.VolSize = rbdVol.VolSize
|
2019-04-22 21:35:39 +00:00
|
|
|
rbdSnap.SourceVolumeID = req.GetSourceVolumeId()
|
|
|
|
rbdSnap.RequestName = req.GetName()
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := cs.SnapshotLocks.TryAcquire(req.GetName()); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, req.GetName())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName())
|
|
|
|
}
|
|
|
|
defer cs.SnapshotLocks.Release(req.GetName())
|
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
|
|
|
|
2020-07-02 08:48:12 +00:00
|
|
|
// Take lock on parent rbd image
|
2020-07-13 05:28:17 +00:00
|
|
|
if err = cs.OperationLocks.GetSnapshotCreateLock(rbdSnap.SourceVolumeID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
2020-07-02 08:48:12 +00:00
|
|
|
}
|
2020-07-13 05:28:17 +00:00
|
|
|
defer cs.OperationLocks.ReleaseSnapshotCreateLock(rbdSnap.SourceVolumeID)
|
2020-07-02 08:48:12 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// Need to check for already existing snapshot name, and if found
|
|
|
|
// check for the requested source volume id and already allocated source volume id
|
2020-06-24 07:43:24 +00:00
|
|
|
found, err := checkSnapCloneExists(ctx, rbdVol, rbdSnap, cr)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2020-07-08 23:00:23 +00:00
|
|
|
if errors.Is(err, util.ErrSnapNameConflict) {
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, status.Error(codes.AlreadyExists, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, status.Errorf(codes.Internal, err.Error())
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
if found {
|
2022-03-02 09:12:22 +00:00
|
|
|
return cloneFromSnapshot(ctx, rbdVol, rbdSnap, cr, req.GetParameters())
|
2018-08-09 13:07:13 +00:00
|
|
|
}
|
2020-07-12 04:42:19 +00:00
|
|
|
|
|
|
|
err = flattenTemporaryClonedImages(ctx, rbdVol, cr)
|
2020-07-01 07:05:07 +00:00
|
|
|
if err != nil {
|
2020-07-12 04:42:19 +00:00
|
|
|
return nil, err
|
2020-07-01 07:05:07 +00:00
|
|
|
}
|
2018-08-09 13:07:13 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
err = reserveSnap(ctx, rbdSnap, rbdVol, cr)
|
2019-01-28 19:55:10 +00:00
|
|
|
if err != nil {
|
2019-03-07 12:56:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
defer func() {
|
2021-07-22 10:12:19 +00:00
|
|
|
if err != nil && !errors.Is(err, ErrFlattenInProgress) {
|
2019-08-22 17:19:06 +00:00
|
|
|
errDefer := undoSnapReservation(ctx, rbdSnap, cr)
|
2019-04-22 21:35:39 +00:00
|
|
|
if errDefer != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", req.GetName(), errDefer)
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2019-01-28 19:55:10 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
vol, err := cs.doSnapshotClone(ctx, rbdVol, rbdSnap, cr)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
2021-07-22 10:12:19 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
|
|
|
|
2022-03-02 09:12:22 +00:00
|
|
|
// Update the metadata on snapshot not on the original image
|
|
|
|
rbdVol.RbdImageName = rbdSnap.RbdSnapName
|
2022-04-11 04:27:29 +00:00
|
|
|
rbdVol.ClusterName = cs.ClusterName
|
2022-03-02 09:12:22 +00:00
|
|
|
|
2022-06-02 11:01:43 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
log.DebugLog(ctx, "Removing clone image %q", rbdVol)
|
2024-07-05 06:46:09 +00:00
|
|
|
errDefer := rbdVol.Delete(ctx)
|
2022-06-02 11:01:43 +00:00
|
|
|
if errDefer != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to delete clone image %q: %v", rbdVol, errDefer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-04-04 11:53:50 +00:00
|
|
|
err = rbdVol.unsetAllMetadata(k8s.GetVolumeMetadataKeys())
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2022-03-02 09:12:22 +00:00
|
|
|
// Set snapshot-name/snapshot-namespace/snapshotcontent-name details
|
|
|
|
// on RBD backend image as metadata on create
|
2022-04-28 06:33:30 +00:00
|
|
|
metadata := k8s.GetSnapshotMetadata(req.GetParameters())
|
|
|
|
err = rbdVol.setAllMetadata(metadata)
|
2022-03-02 09:12:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2024-08-08 09:03:29 +00:00
|
|
|
csiSnap, err := vol.toSnapshot().ToCSI(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return &csi.CreateSnapshotResponse{
|
2024-08-08 09:03:29 +00:00
|
|
|
Snapshot: csiSnap,
|
2019-01-28 19:55:10 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
// cloneFromSnapshot is a helper for CreateSnapshot that continues creating an
|
|
|
|
// RBD image from an RBD snapshot if the process was interrupted at one point.
|
2021-06-25 11:39:42 +00:00
|
|
|
func cloneFromSnapshot(
|
|
|
|
ctx context.Context,
|
|
|
|
rbdVol *rbdVolume,
|
|
|
|
rbdSnap *rbdSnapshot,
|
2022-03-02 09:12:22 +00:00
|
|
|
cr *util.Credentials,
|
2022-06-01 10:17:19 +00:00
|
|
|
parameters map[string]string,
|
|
|
|
) (*csi.CreateSnapshotResponse, error) {
|
2024-03-26 08:51:19 +00:00
|
|
|
vol := rbdSnap.toVolume()
|
2021-04-08 09:21:44 +00:00
|
|
|
err := vol.Connect(cr)
|
|
|
|
if err != nil {
|
2020-12-01 09:42:53 +00:00
|
|
|
uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr)
|
2021-04-08 09:21:44 +00:00
|
|
|
if uErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
|
2021-04-08 09:21:44 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
return nil, status.Errorf(codes.Internal, err.Error())
|
|
|
|
}
|
2024-03-21 14:01:56 +00:00
|
|
|
defer vol.Destroy(ctx)
|
2021-04-08 09:21:44 +00:00
|
|
|
|
2024-03-04 15:13:31 +00:00
|
|
|
err = rbdVol.copyEncryptionConfig(ctx, &vol.rbdImage, false)
|
2022-04-25 10:15:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2021-04-08 09:21:44 +00:00
|
|
|
}
|
|
|
|
|
2021-11-10 09:38:32 +00:00
|
|
|
err = vol.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
2021-04-08 09:21:44 +00:00
|
|
|
if errors.Is(err, ErrFlattenInProgress) {
|
2021-07-22 10:12:19 +00:00
|
|
|
// if flattening is in progress, return error and do not cleanup
|
|
|
|
return nil, status.Errorf(codes.Internal, err.Error())
|
2021-04-08 09:21:44 +00:00
|
|
|
} else if err != nil {
|
2020-12-01 09:42:53 +00:00
|
|
|
uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr)
|
2021-04-08 09:21:44 +00:00
|
|
|
if uErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
|
2021-04-08 09:21:44 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
return nil, status.Errorf(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2022-03-02 09:12:22 +00:00
|
|
|
// Update snapshot-name/snapshot-namespace/snapshotcontent-name details on
|
|
|
|
// RBD backend image as metadata on restart of provisioner pod when image exist
|
|
|
|
if len(parameters) != 0 {
|
2022-04-28 06:33:30 +00:00
|
|
|
metadata := k8s.GetSnapshotMetadata(parameters)
|
|
|
|
err = rbdVol.setAllMetadata(metadata)
|
2022-03-02 09:12:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-08 09:03:29 +00:00
|
|
|
csiSnap, err := rbdSnap.ToCSI(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2021-04-08 09:21:44 +00:00
|
|
|
return &csi.CreateSnapshotResponse{
|
2024-08-08 09:03:29 +00:00
|
|
|
Snapshot: csiSnap,
|
2021-04-08 09:21:44 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.CreateSnapshotRequest) error {
|
2021-06-25 11:39:42 +00:00
|
|
|
if err := cs.Driver.ValidateControllerServiceRequest(
|
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check sanity of request Snapshot Name, Source Volume Id
|
2024-04-04 08:52:46 +00:00
|
|
|
if req.GetName() == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "snapshot Name cannot be empty")
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
2024-04-04 08:52:46 +00:00
|
|
|
if req.GetSourceVolumeId() == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return status.Error(codes.InvalidArgument, "source Volume ID cannot be empty")
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-02-24 13:19:42 +00:00
|
|
|
options := req.GetParameters()
|
|
|
|
if value, ok := options["snapshotNamePrefix"]; ok && value == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty snapshot name prefix to provision snapshot from")
|
|
|
|
}
|
2021-03-18 11:28:23 +00:00
|
|
|
if value, ok := options["pool"]; ok && value == "" {
|
|
|
|
return status.Error(codes.InvalidArgument, "empty pool name in which rbd image will be created")
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) doSnapshotClone(
|
|
|
|
ctx context.Context,
|
|
|
|
parentVol *rbdVolume,
|
|
|
|
rbdSnap *rbdSnapshot,
|
2022-06-01 10:17:19 +00:00
|
|
|
cr *util.Credentials,
|
|
|
|
) (*rbdVolume, error) {
|
2020-06-24 07:43:24 +00:00
|
|
|
// generate cloned volume details from snapshot
|
2024-03-26 08:51:19 +00:00
|
|
|
cloneRbd := rbdSnap.toVolume()
|
2024-03-21 14:01:56 +00:00
|
|
|
defer cloneRbd.Destroy(ctx)
|
2020-06-24 07:43:24 +00:00
|
|
|
// add image feature for cloneRbd
|
|
|
|
f := []string{librbd.FeatureNameLayering, librbd.FeatureNameDeepFlatten}
|
2021-12-20 15:14:28 +00:00
|
|
|
cloneRbd.ImageFeatureSet = librbd.FeatureSetFromNames(f)
|
2020-06-24 07:43:24 +00:00
|
|
|
|
|
|
|
err := cloneRbd.Connect(cr)
|
|
|
|
if err != nil {
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
|
2021-11-10 09:38:32 +00:00
|
|
|
err = createRBDClone(ctx, parentVol, cloneRbd, rbdSnap)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create snapshot: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrFlattenInProgress) {
|
2020-06-24 07:43:24 +00:00
|
|
|
// cleanup clone and snapshot
|
2021-11-10 09:38:32 +00:00
|
|
|
errCleanUp := cleanUpSnapshot(ctx, cloneRbd, rbdSnap, cloneRbd)
|
2020-06-24 07:43:24 +00:00
|
|
|
if errCleanUp != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to cleanup snapshot and clone: %v", errCleanUp)
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
}()
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2024-03-04 15:13:31 +00:00
|
|
|
err = parentVol.copyEncryptionConfig(ctx, &cloneRbd.rbdImage, false)
|
2022-04-25 10:15:08 +00:00
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to copy encryption "+
|
|
|
|
"config for %q: %v", cloneRbd, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2022-04-25 10:15:08 +00:00
|
|
|
return nil, err
|
2021-03-31 13:22:26 +00:00
|
|
|
}
|
|
|
|
|
2022-01-25 07:37:15 +00:00
|
|
|
err = cloneRbd.createSnapshot(ctx, rbdSnap)
|
2021-06-18 04:12:48 +00:00
|
|
|
if err != nil {
|
2022-01-25 07:37:15 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, err)
|
2021-06-18 04:12:48 +00:00
|
|
|
|
2022-01-25 07:37:15 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = cloneRbd.getImageID()
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get image id: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
// save image ID
|
2021-07-13 12:21:05 +00:00
|
|
|
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to connect to cluster: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
2020-10-14 16:19:03 +00:00
|
|
|
err = j.StoreImageID(ctx, rbdSnap.JournalPool, rbdSnap.ReservedID, cloneRbd.ImageID)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to reserve volume id: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
|
2021-11-10 09:38:32 +00:00
|
|
|
err = cloneRbd.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, err
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-22 10:12:19 +00:00
|
|
|
return cloneRbd, nil
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// DeleteSnapshot deletes the snapshot in backend and removes the
|
2020-07-19 12:21:03 +00:00
|
|
|
// snapshot metadata from store.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) DeleteSnapshot(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.DeleteSnapshotRequest,
|
|
|
|
) (*csi.DeleteSnapshotResponse, error) {
|
2021-06-25 11:39:42 +00:00
|
|
|
if err := cs.Driver.ValidateControllerServiceRequest(
|
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-08-08 05:42:17 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-25 19:29:17 +00:00
|
|
|
cr, err := util.NewUserCredentials(req.GetSecrets())
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2018-08-08 05:42:17 +00:00
|
|
|
snapshotID := req.GetSnapshotId()
|
2019-06-10 06:48:41 +00:00
|
|
|
if snapshotID == "" {
|
2019-05-13 04:47:17 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "snapshot ID cannot be empty")
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2019-01-28 13:59:16 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID)
|
|
|
|
}
|
|
|
|
defer cs.SnapshotLocks.Release(snapshotID)
|
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
// lock out snapshotID for restore operation
|
|
|
|
if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-13 05:28:17 +00:00
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
|
|
|
}
|
|
|
|
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
|
|
|
|
|
2024-03-21 09:48:50 +00:00
|
|
|
rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, req.GetSecrets())
|
|
|
|
if err != nil {
|
2021-08-17 05:39:56 +00:00
|
|
|
// if error is ErrPoolNotFound, the pool is already deleted we don't
|
2020-01-31 08:49:11 +00:00
|
|
|
// need to worry about deleting snapshot or omap data, return success
|
2020-07-08 23:00:23 +00:00
|
|
|
if errors.Is(err, util.ErrPoolNotFound) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-01-31 08:49:11 +00:00
|
|
|
return &csi.DeleteSnapshotResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-06-28 01:10:32 +00:00
|
|
|
// if error is ErrKeyNotFound, then a previous attempt at deletion was complete
|
|
|
|
// or partially complete (snap and snapOMap are garbage collected already), hence return
|
|
|
|
// success as deletion is complete
|
2020-07-08 23:00:23 +00:00
|
|
|
if errors.Is(err, util.ErrKeyNotFound) {
|
2024-03-26 14:38:20 +00:00
|
|
|
log.UsefulLog(ctx, "snapshot %s was been deleted already: %v", snapshotID, err)
|
|
|
|
|
2019-06-28 01:10:32 +00:00
|
|
|
return &csi.DeleteSnapshotResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2021-12-15 06:30:41 +00:00
|
|
|
// if the error is ErrImageNotFound, We need to cleanup the image from
|
|
|
|
// trash and remove the metadata in OMAP.
|
|
|
|
if errors.Is(err, ErrImageNotFound) {
|
2024-03-26 14:38:20 +00:00
|
|
|
log.UsefulLog(ctx, "cleaning up leftovers of snapshot %s: %v", snapshotID, err)
|
|
|
|
|
2021-12-15 06:30:41 +00:00
|
|
|
err = cleanUpImageAndSnapReservation(ctx, rbdSnap, cr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteSnapshotResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2024-03-21 14:01:56 +00:00
|
|
|
defer rbdSnap.Destroy(ctx)
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
// safeguard against parallel create or delete requests against the same
|
|
|
|
// name
|
|
|
|
if acquired := cs.SnapshotLocks.TryAcquire(rbdSnap.RequestName); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, rbdSnap.RequestName)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdSnap.RequestName)
|
|
|
|
}
|
|
|
|
defer cs.SnapshotLocks.Release(rbdSnap.RequestName)
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
// Deleting snapshot and cloned volume
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "deleting cloned rbd volume %s", rbdSnap.RbdSnapName)
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2024-03-26 08:51:19 +00:00
|
|
|
rbdVol := rbdSnap.toVolume()
|
2020-06-24 07:43:24 +00:00
|
|
|
|
|
|
|
err = rbdVol.Connect(cr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2024-03-21 14:01:56 +00:00
|
|
|
defer rbdVol.Destroy(ctx)
|
2020-06-24 07:43:24 +00:00
|
|
|
|
2021-12-15 06:30:41 +00:00
|
|
|
rbdVol.ImageID = rbdSnap.ImageID
|
|
|
|
// update parent name to delete the snapshot
|
|
|
|
rbdSnap.RbdImageName = rbdVol.RbdImageName
|
2021-11-10 09:38:32 +00:00
|
|
|
err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol)
|
2020-06-24 07:43:24 +00:00
|
|
|
if err != nil {
|
2021-12-15 06:30:41 +00:00
|
|
|
log.ErrorLog(ctx, "failed to delete image: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-12-15 06:30:41 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2020-06-24 07:43:24 +00:00
|
|
|
}
|
|
|
|
err = undoSnapReservation(ctx, rbdSnap, cr)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) on image (%s) (%s)",
|
2020-06-24 07:43:24 +00:00
|
|
|
rbdSnap.RequestName, rbdSnap.RbdSnapName, rbdSnap.RbdImageName, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-06-24 07:43:24 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.DeleteSnapshotResponse{}, nil
|
|
|
|
}
|
2019-11-27 12:14:31 +00:00
|
|
|
|
2021-12-15 06:30:41 +00:00
|
|
|
// cleanUpImageAndSnapReservation cleans up the image from the trash and
|
|
|
|
// snapshot reservation in rados OMAP.
|
|
|
|
func cleanUpImageAndSnapReservation(ctx context.Context, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
|
2024-03-26 08:51:19 +00:00
|
|
|
rbdVol := rbdSnap.toVolume()
|
2021-12-15 06:30:41 +00:00
|
|
|
err := rbdVol.Connect(cr)
|
|
|
|
if err != nil {
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2024-03-21 14:01:56 +00:00
|
|
|
defer rbdVol.Destroy(ctx)
|
2021-12-15 06:30:41 +00:00
|
|
|
|
|
|
|
// cleanup the image from trash if the error is image not found.
|
|
|
|
err = rbdVol.ensureImageCleanup(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to delete rbd image: %q with error: %v", rbdVol.Pool, rbdVol.VolName, err)
|
|
|
|
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
err = undoSnapReservation(ctx, rbdSnap, cr)
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap %q",
|
|
|
|
rbdSnap.RequestName, rbdSnap, err)
|
|
|
|
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// ControllerExpandVolume expand RBD Volumes on demand based on resizer request.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (cs *ControllerServer) ControllerExpandVolume(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.ControllerExpandVolumeRequest,
|
|
|
|
) (*csi.ControllerExpandVolumeResponse, error) {
|
2021-11-19 04:40:12 +00:00
|
|
|
err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "invalid expand volume req: %v", protosanitizer.StripSecrets(req))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-11-27 12:14:31 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
volID := req.GetVolumeId()
|
|
|
|
if volID == "" {
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "volume ID cannot be empty")
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
capRange := req.GetCapacityRange()
|
|
|
|
if capRange == nil {
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "capacityRange cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
// lock out parallel requests against the same volume ID
|
|
|
|
if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
2019-12-13 10:29:33 +00:00
|
|
|
defer cs.VolumeLocks.Release(volID)
|
2019-11-27 12:14:31 +00:00
|
|
|
|
2021-11-19 04:40:12 +00:00
|
|
|
cr, err := util.NewUserCredentialsWithMigration(req.GetSecrets())
|
2019-11-27 12:14:31 +00:00
|
|
|
if err != nil {
|
2021-11-19 04:40:12 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
|
|
|
defer cr.DeleteCredentials()
|
2021-12-22 05:59:09 +00:00
|
|
|
rbdVol, err := genVolFromVolIDWithMigration(ctx, volID, cr, req.GetSecrets())
|
2019-11-27 12:14:31 +00:00
|
|
|
if err != nil {
|
2021-01-26 14:08:59 +00:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, ErrImageNotFound):
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.NotFound, "volume ID %s not found", volID)
|
2021-01-26 14:08:59 +00:00
|
|
|
case errors.Is(err, util.ErrPoolNotFound):
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get backend volume for %s: %v", volID, err)
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.NotFound, err.Error())
|
2021-01-26 14:08:59 +00:00
|
|
|
default:
|
2020-05-11 08:48:52 +00:00
|
|
|
err = status.Errorf(codes.Internal, err.Error())
|
2020-01-31 08:49:11 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-05-11 08:48:52 +00:00
|
|
|
return nil, err
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
2024-03-21 14:01:56 +00:00
|
|
|
defer rbdVol.Destroy(ctx)
|
2019-11-27 12:14:31 +00:00
|
|
|
|
2021-07-08 15:06:42 +00:00
|
|
|
// NodeExpansion is needed for PersistentVolumes with,
|
|
|
|
// 1. Filesystem VolumeMode with & without Encryption and
|
|
|
|
// 2. Block VolumeMode with Encryption
|
|
|
|
// Hence set nodeExpansion flag based on VolumeMode and Encryption status
|
|
|
|
nodeExpansion := true
|
2022-05-27 18:03:32 +00:00
|
|
|
if req.GetVolumeCapability().GetBlock() != nil && !rbdVol.isBlockEncrypted() {
|
2021-07-08 15:06:42 +00:00
|
|
|
nodeExpansion = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// lock out volumeID for clone and delete operation
|
|
|
|
if err = cs.OperationLocks.GetExpandLock(volID); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-08 15:06:42 +00:00
|
|
|
|
|
|
|
return nil, status.Error(codes.Aborted, err.Error())
|
2020-01-29 11:44:45 +00:00
|
|
|
}
|
2021-07-08 15:06:42 +00:00
|
|
|
defer cs.OperationLocks.ReleaseExpandLock(volID)
|
2020-01-29 11:44:45 +00:00
|
|
|
|
2019-11-27 12:14:31 +00:00
|
|
|
// always round up the request size in bytes to the nearest MiB/GiB
|
|
|
|
volSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
|
|
|
|
|
|
|
|
// resize volume if required
|
|
|
|
if rbdVol.VolSize < volSize {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd volume %s size is %v,resizing to %v", rbdVol, rbdVol.VolSize, volSize)
|
2020-07-30 06:28:51 +00:00
|
|
|
err = rbdVol.resize(volSize)
|
2019-11-27 12:14:31 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to resize rbd image: %s with error: %v", rbdVol, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2024-12-12 09:26:25 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
// adjust rbd qos after resize volume.
|
|
|
|
err = rbdVol.AdjustQOS(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.DebugLog(ctx, "failed adjust QOS for rbd image")
|
|
|
|
|
2019-11-27 12:14:31 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.ControllerExpandVolumeResponse{
|
2019-12-13 10:29:33 +00:00
|
|
|
CapacityBytes: rbdVol.VolSize,
|
2019-11-27 12:14:31 +00:00
|
|
|
NodeExpansionRequired: nodeExpansion,
|
|
|
|
}, nil
|
|
|
|
}
|
2022-07-20 08:20:20 +00:00
|
|
|
|
|
|
|
// ControllerPublishVolume is a dummy publish implementation to mimic a successful attach operation being a NOOP.
|
|
|
|
func (cs *ControllerServer) ControllerPublishVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.ControllerPublishVolumeRequest,
|
|
|
|
) (*csi.ControllerPublishVolumeResponse, error) {
|
|
|
|
if req.GetVolumeId() == "" {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty")
|
|
|
|
}
|
|
|
|
if req.GetNodeId() == "" {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, "Node ID cannot be empty")
|
|
|
|
}
|
|
|
|
if req.GetVolumeCapability() == nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.ControllerPublishVolumeResponse{
|
|
|
|
// the dummy response carry an empty map in its response.
|
|
|
|
PublishContext: map[string]string{},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ControllerUnPublishVolume is a dummy unpublish implementation to mimic a successful attach operation being a NOOP.
|
|
|
|
func (cs *ControllerServer) ControllerUnpublishVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.ControllerUnpublishVolumeRequest,
|
|
|
|
) (*csi.ControllerUnpublishVolumeResponse, error) {
|
|
|
|
if req.GetVolumeId() == "" {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.ControllerUnpublishVolumeResponse{}, nil
|
|
|
|
}
|