diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 1b8c387bb..7735d8306 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -808,25 +808,34 @@ func (cs *ControllerServer) checkErrAndUndoReserve( func (cs *ControllerServer) DeleteVolume( ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { - if err := cs.Driver.ValidateControllerServiceRequest( + var err error + if err = cs.Driver.ValidateControllerServiceRequest( csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { log.ErrorLog(ctx, "invalid delete volume req: %v", protosanitizer.StripSecrets(req)) return nil, err } - cr, err := util.NewUserCredentials(req.GetSecrets()) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - defer cr.DeleteCredentials() - // For now the image get unconditionally deleted, but here retention policy can be checked volumeID := req.GetVolumeId() if volumeID == "" { return nil, status.Error(codes.InvalidArgument, "empty volume ID in request") } + secrets := req.GetSecrets() + if util.IsMigrationSecret(secrets) { + secrets, err = util.ParseAndSetSecretMapFromMigSecret(secrets) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + } + + cr, err := util.NewUserCredentials(secrets) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + defer cr.DeleteCredentials() + if acquired := cs.VolumeLocks.TryAcquire(volumeID); !acquired { log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) @@ -852,7 +861,7 @@ func (cs *ControllerServer) DeleteVolume( return &csi.DeleteVolumeResponse{}, nil } - rbdVol, err := genVolFromVolID(ctx, volumeID, cr, req.GetSecrets()) + rbdVol, err := genVolFromVolID(ctx, volumeID, cr, secrets) defer rbdVol.Destroy() if err != nil { return cs.checkErrAndUndoReserve(ctx, err, volumeID, rbdVol, cr) diff --git a/internal/util/credentials.go b/internal/util/credentials.go index 542354760..122c24f69 100644 --- a/internal/util/credentials.go +++ b/internal/util/credentials.go @@ -31,6 +31,9 @@ const ( credMonitors = "monitors" tmpKeyFileLocation = "/tmp/csi/keys" tmpKeyFileNamePrefix = "keyfile-" + migUserName = "admin" + migUserID = "adminId" + migUserKey = "key" ) // Credentials struct represents credentials to access the ceph cluster. @@ -119,3 +122,34 @@ func GetMonValFromSecret(secrets map[string]string) (string, error) { return "", fmt.Errorf("missing %q", credMonitors) } + +// ParseAndSetSecretMapFromMigSecret parse the secretmap from the migration request and return +// newsecretmap with the userID and userKey fields set. +func ParseAndSetSecretMapFromMigSecret(secretmap map[string]string) (map[string]string, error) { + newSecretMap := make(map[string]string) + // parse and set userKey + if !IsMigrationSecret(secretmap) { + return nil, errors.New("passed secret map does not contain user key or it is nil") + } + newSecretMap[credUserKey] = secretmap[migUserKey] + // parse and set the userID + newSecretMap[credUserID] = migUserName + if secretmap[migUserID] != "" { + newSecretMap[credUserID] = secretmap[migUserID] + } + + return newSecretMap, nil +} + +// IsMigrationSecret validates if the passed in secretmap is a secret +// of a migration volume request. The migration secret carry a field +// called `key` which is the equivalent of `userKey` which is what we +// check here for identifying the secret. +func IsMigrationSecret(passedSecretMap map[string]string) bool { + // the below 'nil' check is an extra measure as the request validators like + // ValidateNodeStageVolumeRequest() already does the nil check, however considering + // this function can be called independently with a map of secret values + // it is good to have this check in place, also it gives clear error about this + // was hit on migration request compared to general one. + return len(passedSecretMap) != 0 && passedSecretMap[migUserKey] != "" +}