From b132696e54b1eb2398b1b16815599644543f55e7 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Mon, 25 Oct 2021 15:05:33 +0200 Subject: [PATCH 1/7] rbd: note that thick-provisioning is deprecated Thick-provisioning was introduced to make accounting of assigned space for volumes easier. When thick-provisioned volumes are the only consumer of the Ceph cluster, this works fine. However, it is unlikely that this is the case. Instead, accounting of the requested (thin-provisioned) size of volumes is much more practical as different types of volumes can be tracked. OpenShift already provides cluster-wide quotas, which can combine accounting of requested volumes by grouping different StorageClasses. In addition to the difficult practise of allowing only thick-provisioned RBD backed volumes, the performance makes thick-provisioning troublesome. As volumes need to be completely allocated, data needs to be written to the volume. This can take a long time, depending on the size of the volume. Provisioning, cloning and snapshotting becomes very much noticeable, and because of the additional time consumption, more prone to failures. Signed-off-by: Niels de Vos --- docs/deploy-rbd.md | 2 +- examples/rbd/storageclass.yaml | 7 ++++--- internal/rbd/controllerserver.go | 10 ++++++++++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index b80a5aa28..f8fa2dea1 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -64,7 +64,7 @@ make image-cephcsi | `mounter` | no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images | | `encrypted` | no | disabled by default, use `"true"` to enable LUKS encryption on PVC and `"false"` to disable it. **Do not change for existing storageclasses** | | `encryptionKMSID` | no | required if encryption is enabled and a kms is used to store passphrases | -| `thickProvision` | no | if set to `"true"`, newly created RBD images will be completely allocated by writing zeros to it | +| `thickProvision` | no | if set to `"true"`, newly created RBD images will be completely allocated by writing zeros to it (**DEPRECATED**: recommended alternative solution is to use accounting/quotas for created volumes) | **NOTE:** An accompanying CSI configuration file, needs to be provided to the running pods. Refer to [Creating CSI configuration](../examples/README.md#creating-csi-configuration) diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index 61182eb10..a53aace7c 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -29,9 +29,10 @@ parameters: # eg: pool: rbdpool pool: - # Set thickProvision to true if you want RBD images to be fully allocated on - # creation (thin provisioning is the default). - thickProvision: "false" + # Deprecated: Set thickProvision to true if you want RBD images to be fully + # allocated on creation (thin provisioning is the default). + # thickProvision: "false" + # (required) RBD image features, CSI creates image with image-format 2 # CSI RBD currently supports `layering`, `journaling`, `exclusive-lock` # features. If `journaling` is enabled, must enable `exclusive-lock` too. diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 813634592..1b8c387bb 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -1502,6 +1502,10 @@ func (cs *ControllerServer) ControllerExpandVolume( }, nil } +// logThickProvisioningDeprecation makes sure the deprecation warning about +// thick-provisining is logged only once. +var logThickProvisioningDeprecation = true + // isThickProvisionRequest returns true in case the request contains the // `thickProvision` option set to `true`. func isThickProvisionRequest(parameters map[string]string) bool { @@ -1517,5 +1521,11 @@ func isThickProvisionRequest(parameters map[string]string) bool { return false } + if logThickProvisioningDeprecation { + log.WarningLogMsg("thick-provisioning is deprecated and will " + + "be removed in a future release") + logThickProvisioningDeprecation = false + } + return thickBool } From b49bf4b9875663ee432ba1b61691952ebb112260 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Sun, 24 Oct 2021 14:18:28 +0530 Subject: [PATCH 2/7] rbd: parse migration secret and set it for controller server operations This commit adds a couple of helper functions to parse the migration request secret and set it for further csi driver operations. More details: The intree secret has a data field called "key" which is the base64 admin secret key. The ceph CSI driver currently expect the secret to contain data field "UserKey" for the equivalant. The CSI driver also expect the "UserID" field which is not available in the in-tree secret by deafult. This missing userID will be filled (if the username differ than 'admin') in the migration secret as 'adminId' field in the migration request, this commit adds the logic to parse this migration secret as below: "key" field value will be picked up from the migraion secret to "UserKey" field. "adminId" field value will be picked up from the migration secret to "UserID" field if `adminId` field is nil or not set, `UserID` field will be filled with default value ie `admin`.The above logic get activated only when the secret is a migration secret, otherwise skipped to the normal workflow as we have today. Signed-off-by: Humble Chirammal --- internal/rbd/controllerserver.go | 25 +++++++++++++++-------- internal/util/credentials.go | 34 ++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 1b8c387bb..7735d8306 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -808,25 +808,34 @@ func (cs *ControllerServer) checkErrAndUndoReserve( func (cs *ControllerServer) DeleteVolume( ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { - if err := cs.Driver.ValidateControllerServiceRequest( + var err error + if err = cs.Driver.ValidateControllerServiceRequest( csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { log.ErrorLog(ctx, "invalid delete volume req: %v", protosanitizer.StripSecrets(req)) return nil, err } - cr, err := util.NewUserCredentials(req.GetSecrets()) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - defer cr.DeleteCredentials() - // For now the image get unconditionally deleted, but here retention policy can be checked volumeID := req.GetVolumeId() if volumeID == "" { return nil, status.Error(codes.InvalidArgument, "empty volume ID in request") } + secrets := req.GetSecrets() + if util.IsMigrationSecret(secrets) { + secrets, err = util.ParseAndSetSecretMapFromMigSecret(secrets) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + } + + cr, err := util.NewUserCredentials(secrets) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + defer cr.DeleteCredentials() + if acquired := cs.VolumeLocks.TryAcquire(volumeID); !acquired { log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) @@ -852,7 +861,7 @@ func (cs *ControllerServer) DeleteVolume( return &csi.DeleteVolumeResponse{}, nil } - rbdVol, err := genVolFromVolID(ctx, volumeID, cr, req.GetSecrets()) + rbdVol, err := genVolFromVolID(ctx, volumeID, cr, secrets) defer rbdVol.Destroy() if err != nil { return cs.checkErrAndUndoReserve(ctx, err, volumeID, rbdVol, cr) diff --git a/internal/util/credentials.go b/internal/util/credentials.go index 542354760..122c24f69 100644 --- a/internal/util/credentials.go +++ b/internal/util/credentials.go @@ -31,6 +31,9 @@ const ( credMonitors = "monitors" tmpKeyFileLocation = "/tmp/csi/keys" tmpKeyFileNamePrefix = "keyfile-" + migUserName = "admin" + migUserID = "adminId" + migUserKey = "key" ) // Credentials struct represents credentials to access the ceph cluster. @@ -119,3 +122,34 @@ func GetMonValFromSecret(secrets map[string]string) (string, error) { return "", fmt.Errorf("missing %q", credMonitors) } + +// ParseAndSetSecretMapFromMigSecret parse the secretmap from the migration request and return +// newsecretmap with the userID and userKey fields set. +func ParseAndSetSecretMapFromMigSecret(secretmap map[string]string) (map[string]string, error) { + newSecretMap := make(map[string]string) + // parse and set userKey + if !IsMigrationSecret(secretmap) { + return nil, errors.New("passed secret map does not contain user key or it is nil") + } + newSecretMap[credUserKey] = secretmap[migUserKey] + // parse and set the userID + newSecretMap[credUserID] = migUserName + if secretmap[migUserID] != "" { + newSecretMap[credUserID] = secretmap[migUserID] + } + + return newSecretMap, nil +} + +// IsMigrationSecret validates if the passed in secretmap is a secret +// of a migration volume request. The migration secret carry a field +// called `key` which is the equivalent of `userKey` which is what we +// check here for identifying the secret. +func IsMigrationSecret(passedSecretMap map[string]string) bool { + // the below 'nil' check is an extra measure as the request validators like + // ValidateNodeStageVolumeRequest() already does the nil check, however considering + // this function can be called independently with a map of secret values + // it is good to have this check in place, also it gives clear error about this + // was hit on migration request compared to general one. + return len(passedSecretMap) != 0 && passedSecretMap[migUserKey] != "" +} From ff0911fb6acc104ea29fc470318016545ffd95ac Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 25 Oct 2021 14:45:16 +0530 Subject: [PATCH 3/7] rbd: add unittests for IsMigrationSecret and ParseAndSetSecretMapFromMigSecret This commit adds unit tests for newly introduced migration specific functions. Signed-off-by: Humble Chirammal --- internal/util/credentials_test.go | 100 ++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 internal/util/credentials_test.go diff --git a/internal/util/credentials_test.go b/internal/util/credentials_test.go new file mode 100644 index 000000000..735042f08 --- /dev/null +++ b/internal/util/credentials_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2021 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package util + +import ( + "reflect" + "testing" +) + +func TestIsMigrationSecret(t *testing.T) { + t.Parallel() + tests := []struct { + name string + vc map[string]string + want bool + }{ + { + "proper migration secret key set", + map[string]string{"key": "QVFBOFF2SlZheUJQRVJBQWgvS2cwT1laQUhPQno3akZwekxxdGc9PQ=="}, + true, + }, + { + "no key set", + map[string]string{"": ""}, + false, + }, + } + for _, tt := range tests { + newtt := tt + t.Run(newtt.name, func(t *testing.T) { + t.Parallel() + if got := IsMigrationSecret(newtt.vc); got != newtt.want { + t.Errorf("isMigrationSecret() = %v, want %v", got, newtt.want) + } + }) + } +} + +func TestParseAndSetSecretMapFromMigSecret(t *testing.T) { + t.Parallel() + tests := []struct { + name string + secretmap map[string]string + want map[string]string + wantErr bool + }{ + { + "valid migration secret key set", + map[string]string{"key": "QVFBOFF2SlZheUJQRVJBQWgvS2cwT1laQUhPQno3akZwekxxdGc9PQ=="}, + map[string]string{"userKey": "QVFBOFF2SlZheUJQRVJBQWgvS2cwT1laQUhPQno3akZwekxxdGc9PQ==", "userID": "admin"}, + false, + }, + { + "migration secret key value nil", + map[string]string{"key": ""}, + nil, + true, + }, + { + "migration secret key field nil", + map[string]string{"": ""}, + nil, + true, + }, + { + "valid migration secret key and userID set", + map[string]string{"key": "QVFBOFF2SlZheUJQRVJBQWgvS2cwT1laQUhPQno3akZwekxxdGc9PQ==", "adminId": "pooladmin"}, + map[string]string{"userKey": "QVFBOFF2SlZheUJQRVJBQWgvS2cwT1laQUhPQno3akZwekxxdGc9PQ==", "userID": "pooladmin"}, + false, + }, + } + for _, tt := range tests { + newtt := tt + t.Run(newtt.name, func(t *testing.T) { + t.Parallel() + got, err := ParseAndSetSecretMapFromMigSecret(newtt.secretmap) + if (err != nil) != newtt.wantErr { + t.Errorf("ParseAndSetSecretMapFromMigSecret() error = %v, wantErr %v", err, newtt.wantErr) + + return + } + if !reflect.DeepEqual(got, newtt.want) { + t.Errorf("ParseAndSetSecretMapFromMigSecret() got = %v, want %v", got, newtt.want) + } + }) + } +} From 5621f2cfcaed03249097b4fae35f597fcc5a2dce Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 25 Oct 2021 14:47:12 +0530 Subject: [PATCH 4/7] rbd: split the parsing and deletion logic to its own functions. parseAndDeleteMigratedVolume() prviously clubbed the logic of parsing of migration volume handle and then continued with the deletion of the volume. however this commit split this logic into two, ie parsing has been done in parseMigrationVolID() and DeleteMigratedVolume() deletes the backend volume. Signed-off-by: Humble Chirammal --- internal/rbd/controllerserver.go | 12 ++++++++---- internal/rbd/migration.go | 11 +++-------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 7735d8306..50d7c4f73 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -851,11 +851,15 @@ func (cs *ControllerServer) DeleteVolume( } defer cs.OperationLocks.ReleaseDeleteLock(volumeID) + // if this is a migration request volID, delete the volume in backend if isMigrationVolID(volumeID) { - log.DebugLog(ctx, "migration volume ID : %s", volumeID) - err = parseAndDeleteMigratedVolume(ctx, volumeID, cr) - if err != nil && !errors.Is(err, ErrImageNotFound) { - return nil, status.Error(codes.Internal, err.Error()) + pmVolID, pErr := parseMigrationVolID(volumeID) + if pErr != nil { + return nil, status.Error(codes.InvalidArgument, pErr.Error()) + } + pErr = deleteMigratedVolume(ctx, pmVolID, cr) + if pErr != nil && !errors.Is(pErr, ErrImageNotFound) { + return nil, status.Error(codes.Internal, pErr.Error()) } return &csi.DeleteVolumeResponse{}, nil diff --git a/internal/rbd/migration.go b/internal/rbd/migration.go index d76fab28e..ca8c48e4c 100644 --- a/internal/rbd/migration.go +++ b/internal/rbd/migration.go @@ -74,15 +74,10 @@ func parseMigrationVolID(vh string) (*migrationVolID, error) { return mh, nil } -// parseAndDeleteMigratedVolume get rbd volume details from the migration volID +// deleteMigratedVolume get rbd volume details from the migration volID // and delete the volume from the cluster, return err if there was an error on the process. -func parseAndDeleteMigratedVolume(ctx context.Context, volumeID string, cr *util.Credentials) error { - parsedMigHandle, err := parseMigrationVolID(volumeID) - if err != nil { - log.ErrorLog(ctx, "failed to parse migration volumeID: %s , err: %v", volumeID, err) - - return err - } +func deleteMigratedVolume(ctx context.Context, parsedMigHandle *migrationVolID, cr *util.Credentials) error { + var err error rv := &rbdVolume{} // fill details to rv struct from parsed migration handle From 6aec858cbaae1dbf5332f18823b71d44c3710408 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 25 Oct 2021 14:48:43 +0530 Subject: [PATCH 5/7] rbd: parse migration secret and set fields for nodestage operations this commit make use of the migration request secret parsing and set the required fields for further nodestage operations Signed-off-by: Humble Chirammal --- internal/rbd/nodeserver.go | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 846668e6f..052deca24 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -151,10 +151,14 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r } // populateRbdVol update the fields in rbdVolume struct based on the request it received. +// this function also receive the credentials and secrets args as it differs in its data. +// The credentials are used directly by functions like voljournal.Connect() and other functions +// like genVolFromVolumeOptions() make use of secrets. func populateRbdVol( ctx context.Context, req *csi.NodeStageVolumeRequest, - cr *util.Credentials) (*rbdVolume, error) { + cr *util.Credentials, + secrets map[string]string) (*rbdVolume, error) { var err error var j *journal.Connection volID := req.GetVolumeId() @@ -179,7 +183,7 @@ func populateRbdVol( disableInUseChecks = true } - rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), req.GetSecrets(), disableInUseChecks, true) + rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), secrets, disableInUseChecks, true) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -254,13 +258,20 @@ func populateRbdVol( func (ns *NodeServer) NodeStageVolume( ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { - if err := util.ValidateNodeStageVolumeRequest(req); err != nil { + var err error + if err = util.ValidateNodeStageVolumeRequest(req); err != nil { return nil, err } volID := req.GetVolumeId() - - cr, err := util.NewUserCredentials(req.GetSecrets()) + secrets := req.GetSecrets() + if util.IsMigrationSecret(secrets) { + secrets, err = util.ParseAndSetSecretMapFromMigSecret(secrets) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + } + cr, err := util.NewUserCredentials(secrets) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -298,7 +309,7 @@ func (ns *NodeServer) NodeStageVolume( return nil, status.Error(codes.InvalidArgument, "missing required parameter imageFeatures") } - rv, err := populateRbdVol(ctx, req, cr) + rv, err := populateRbdVol(ctx, req, cr, secrets) if err != nil { return nil, err } @@ -335,7 +346,7 @@ func (ns *NodeServer) NodeStageVolume( // perform the actual staging and if this fails, have undoStagingTransaction // cleans up for us - transaction, err = ns.stageTransaction(ctx, req, rv, isStaticVol) + transaction, err = ns.stageTransaction(ctx, req, cr, rv, isStaticVol) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -352,6 +363,7 @@ func (ns *NodeServer) NodeStageVolume( func (ns *NodeServer) stageTransaction( ctx context.Context, req *csi.NodeStageVolumeRequest, + cr *util.Credentials, volOptions *rbdVolume, staticVol bool) (stageTransaction, error) { transaction := stageTransaction{} @@ -359,13 +371,6 @@ func (ns *NodeServer) stageTransaction( var err error var readOnly bool - var cr *util.Credentials - cr, err = util.NewUserCredentials(req.GetSecrets()) - if err != nil { - return transaction, err - } - defer cr.DeleteCredentials() - // Allow image to be mounted on multiple nodes if it is ROX if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { log.ExtendedLog(ctx, "setting disableInUseChecks on rbd volume to: %v", req.GetVolumeId) From de57fa1804f4d15a03c4a6962d0672485b22ffa3 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Fri, 15 Oct 2021 12:14:10 +0530 Subject: [PATCH 6/7] e2e: adjust deletion, filesystem and block tests for migration volume this commit create and make use of migration secret in the requests and validate various csi operations Signed-off-by: Humble Chirammal --- e2e/ceph_user.go | 2 + e2e/migration.go | 136 +++++++++++++++++++++++++++++++++++++++++++++-- e2e/rbd.go | 41 ++++++++++++-- e2e/staticpvc.go | 7 ++- 4 files changed, 178 insertions(+), 8 deletions(-) diff --git a/e2e/ceph_user.go b/e2e/ceph_user.go index 7d21ae473..00b273d6b 100644 --- a/e2e/ceph_user.go +++ b/e2e/ceph_user.go @@ -21,6 +21,8 @@ const ( rbdProvisionerSecretName = "cephcsi-rbd-provisioner" rbdNamespaceNodePluginSecretName = "cephcsi-rbd-ns-node" rbdNamespaceProvisionerSecretName = "cephcsi-rbd-ns-provisioner" + rbdMigrationNodePluginSecretName = "cephcsi-rbd-mig-node" + rbdMigrationProvisionerSecretName = "cephcsi-rbd-mig-provisioner" cephFSNodePluginSecretName = "cephcsi-cephfs-node" cephFSProvisionerSecretName = "cephcsi-cephfs-provisioner" ) diff --git a/e2e/migration.go b/e2e/migration.go index ddeaeaf8b..04390fee3 100644 --- a/e2e/migration.go +++ b/e2e/migration.go @@ -96,7 +96,7 @@ func validateRBDStaticMigrationPVDeletion(f *framework.Framework, appPath, scNam err = deletePVCAndApp("", f, pvc, app) if err != nil { - return fmt.Errorf("failed to delete PVC and application with error %w", err) + return fmt.Errorf("failed to delete PVC and application: %w", err) } return err @@ -134,12 +134,142 @@ func generateClusterIDConfigMapForMigration(f *framework.Framework, c kubernetes // create custom configmap err = createCustomConfigMap(f.ClientSet, rbdDirPath, clusterInfo) if err != nil { - return fmt.Errorf("failed to create configmap with error %w", err) + return fmt.Errorf("failed to create configmap: %w", err) } // restart csi pods for the configmap to take effect. err = recreateCSIRBDPods(f) if err != nil { - return fmt.Errorf("failed to recreate rbd csi pods with error %w", err) + return fmt.Errorf("failed to recreate rbd csi pods: %w", err) + } + + return nil +} + +// createRBDMigrationSecret creates a migration secret with the passed in user name. +// this secret differs from csi secret data on below aspects. +// equivalent to the `UserKey` field, migration secret has `key` field. +// if 'userName' has passed and if it is not admin, the passed in userName will be +// set as the `adminId` field in the secret. +func createRBDMigrationSecret(f *framework.Framework, secretName, userName, userKey string) error { + secPath := fmt.Sprintf("%s/%s", rbdExamplePath, "secret.yaml") + sec, err := getSecret(secPath) + if err != nil { + return err + } + if secretName != "" { + sec.Name = secretName + } + // if its admin, we dont need to change anything in the migration secret, the CSI driver + // will use the key from existing secret and continue. + if userName != "admin" { + sec.StringData["adminId"] = userName + } + sec.StringData["key"] = userKey + sec.Namespace = cephCSINamespace + _, err = f.ClientSet.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sec, metav1.CreateOptions{}) + + return err +} + +// createMigrationUserSecretAndSC creates migration user and a secret associated with this user first, +// then create SC based on the same. +func createMigrationUserSecretAndSC(f *framework.Framework, scName string) error { + if scName == "" { + scName = defaultSCName + } + err := createProvNodeCephUserAndSecret(f, true, true) + if err != nil { + return err + } + + err = createMigrationSC(f, scName) + if err != nil { + return err + } + + return nil +} + +func createMigrationSC(f *framework.Framework, scName string) error { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + return fmt.Errorf("failed to delete storageclass: %w", err) + } + param := make(map[string]string) + // add new secrets to the SC parameters + param["csi.storage.k8s.io/provisioner-secret-namespace"] = cephCSINamespace + param["csi.storage.k8s.io/provisioner-secret-name"] = rbdMigrationProvisionerSecretName + param["csi.storage.k8s.io/controller-expand-secret-namespace"] = cephCSINamespace + param["csi.storage.k8s.io/controller-expand-secret-name"] = rbdMigrationProvisionerSecretName + param["csi.storage.k8s.io/node-stage-secret-namespace"] = cephCSINamespace + param["csi.storage.k8s.io/node-stage-secret-name"] = rbdMigrationNodePluginSecretName + err = createRBDStorageClass(f.ClientSet, f, scName, nil, param, deletePolicy) + if err != nil { + return fmt.Errorf("failed to create storageclass: %w", err) + } + + return nil +} + +// createProvNodeCephUserAndSecret fetches the ceph migration user's key and create migration secret +// with it based on the arg values of 'provSecret' and 'nodeSecret'. +func createProvNodeCephUserAndSecret(f *framework.Framework, provisionerSecret, nodeSecret bool) error { + if provisionerSecret { + // Fetch the key. + key, err := createCephUser( + f, + keyringRBDProvisionerUsername, + rbdProvisionerCaps(defaultRBDPool, radosNamespace), + ) + if err != nil { + return fmt.Errorf("failed to create user %q: %w", keyringRBDProvisionerUsername, err) + } + err = createRBDMigrationSecret(f, rbdMigrationProvisionerSecretName, keyringRBDProvisionerUsername, key) + if err != nil { + return fmt.Errorf("failed to create provisioner secret: %w", err) + } + } + + if nodeSecret { + // Fetch the key. + key, err := createCephUser( + f, + keyringRBDNodePluginUsername, + rbdNodePluginCaps(defaultRBDPool, radosNamespace)) + if err != nil { + return fmt.Errorf("failed to create user %q: %w", keyringRBDNodePluginUsername, err) + } + err = createRBDMigrationSecret(f, rbdMigrationNodePluginSecretName, keyringRBDNodePluginUsername, key) + if err != nil { + return fmt.Errorf("failed to create node secret: %w", err) + } + } + + return nil +} + +// deleteProvNodeMigrationSecret deletes ceph migration secrets based on the +// arg values of 'provisionerSecret' and 'nodeSecret'. +func deleteProvNodeMigrationSecret(f *framework.Framework, provisionerSecret, nodeSecret bool) error { + c := f.ClientSet + if provisionerSecret { + // delete RBD provisioner secret. + err := c.CoreV1(). + Secrets(cephCSINamespace). + Delete(context.TODO(), rbdMigrationProvisionerSecretName, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("failed to delete provisioner secret: %w", err) + } + } + + if nodeSecret { + // delete RBD node secret. + err := c.CoreV1(). + Secrets(cephCSINamespace). + Delete(context.TODO(), rbdMigrationNodePluginSecretName, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("failed to delete node secret: %w", err) + } } return nil diff --git a/e2e/rbd.go b/e2e/rbd.go index 629975503..54815c6c8 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -374,7 +374,9 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to generate clusterID configmap with error %v", err) } - err = createRBDStorageClass(f.ClientSet, f, "migrationsc", nil, nil, deletePolicy) + + // create a sc with different migration secret + err = createMigrationUserSecretAndSC(f, "migrationsc") if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } @@ -392,6 +394,15 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to create configmap with error %v", err) } + + err = deleteProvNodeMigrationSecret(f, true, true) + if err != nil { + e2elog.Failf("failed to delete migration users and Secrets associated with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } }) By("create a PVC and validate owner", func() { @@ -1606,12 +1617,24 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to generate clusterID configmap with error %v", err) } - err = validateRBDStaticMigrationPV(f, appPath, false) + // create node user and migration secret. + err = createProvNodeCephUserAndSecret(f, false, true) + if err != nil { + e2elog.Failf("failed to create users and secret with error %v", err) + } + + err = validateRBDStaticMigrationPV(f, appPath, rbdMigrationNodePluginSecretName, false) if err != nil { e2elog.Failf("failed to validate rbd migrated static pv with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) + + err = deleteProvNodeMigrationSecret(f, false, true) + if err != nil { + e2elog.Failf("failed to delete users and secret with error %v", err) + } + err = deleteConfigMap(rbdDirPath) if err != nil { e2elog.Failf("failed to delete configmap with error %v", err) @@ -1627,12 +1650,24 @@ var _ = Describe("RBD", func() { if err != nil { e2elog.Failf("failed to generate clusterID configmap with error %v", err) } - err = validateRBDStaticMigrationPV(f, rawAppPath, true) + // create node user and migration secret. + err = createProvNodeCephUserAndSecret(f, false, true) + if err != nil { + e2elog.Failf("failed to create users and secret with error %v", err) + } + + err = validateRBDStaticMigrationPV(f, rawAppPath, rbdMigrationNodePluginSecretName, true) if err != nil { e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) + + err = deleteProvNodeMigrationSecret(f, false, true) + if err != nil { + e2elog.Failf("failed to delete users and secret with error %v", err) + } + err = deleteConfigMap(rbdDirPath) if err != nil { e2elog.Failf("failed to delete configmap with error %v", err) diff --git a/e2e/staticpvc.go b/e2e/staticpvc.go index 3ef2c96c5..3bcf6d261 100644 --- a/e2e/staticpvc.go +++ b/e2e/staticpvc.go @@ -221,7 +221,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI return err } -func validateRBDStaticMigrationPV(f *framework.Framework, appPath string, isBlock bool) error { +func validateRBDStaticMigrationPV(f *framework.Framework, appPath, nodeSecretName string, isBlock bool) error { opt := make(map[string]string) var ( rbdImageName = "test-static-pv" @@ -254,6 +254,9 @@ func validateRBDStaticMigrationPV(f *framework.Framework, appPath string, isBloc if e != "" { return fmt.Errorf("failed to create rbd image %s", e) } + if nodeSecretName == "" { + nodeSecretName = rbdNodePluginSecretName + } opt["migration"] = "true" opt["clusterID"] = getMonsHash(mon) @@ -265,7 +268,7 @@ func validateRBDStaticMigrationPV(f *framework.Framework, appPath string, isBloc pvName, rbdImageName, size, - rbdNodePluginSecretName, + nodeSecretName, cephCSINamespace, sc, "rbd.csi.ceph.com", From c852f487a5e37cc532a636b0187a4bff4e61b83c Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Thu, 28 Oct 2021 12:21:31 +0200 Subject: [PATCH 7/7] util: set defaults for Vault config before converting When using UPPER_CASE formatting for the HashiCorp Vault KMS configuration, a missing `VAULT_DESTROY_KEYS` will cause the option to be set to "false". The default for the option is intended for be "true". This is a difference in behaviour between the `vaultDestroyKeys` and `VAULT_DESTROY_KEYS` options. Both should use a default of "true" when the configuration does not set the option explicitly. By setting the default options in the `standardVault` struct before unmarshalling the configuration in it, the default values will be retained for the missing configuration options. Reported-by: Rachael George Signed-off-by: Niels de Vos --- internal/kms/vault.go | 4 ++-- internal/kms/vault_tokens.go | 11 ++++++++--- internal/kms/vault_tokens_test.go | 13 +++++++++++++ 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/internal/kms/vault.go b/internal/kms/vault.go index d1e146538..6a2f2c744 100644 --- a/internal/kms/vault.go +++ b/internal/kms/vault.go @@ -43,7 +43,7 @@ const ( vaultDefaultRole = "csi-kubernetes" vaultDefaultNamespace = "" vaultDefaultPassphrasePath = "" - vaultDefaultCAVerify = "true" + vaultDefaultCAVerify = true vaultDefaultDestroyKeys = "true" ) @@ -208,7 +208,7 @@ func (vc *vaultConnection) initConnection(config map[string]interface{}) error { keyContext[loss.KeyVaultNamespace] = vaultNamespace } - verifyCA := vaultDefaultCAVerify // optional + verifyCA := strconv.FormatBool(vaultDefaultCAVerify) // optional err = setConfigString(&verifyCA, config, "vaultCAVerify") if errors.Is(err, errConfigOptionInvalid) { return err diff --git a/internal/kms/vault_tokens.go b/internal/kms/vault_tokens.go index 46d7f1a50..d9eee0816 100644 --- a/internal/kms/vault_tokens.go +++ b/internal/kms/vault_tokens.go @@ -101,7 +101,6 @@ func (v *vaultTokenConf) convertStdVaultToCSIConfig(s *standardVault) { // by default the CA should get verified, only when VaultSkipVerify is // set, verification should be disabled - v.VaultCAVerify = vaultDefaultCAVerify verify, err := strconv.ParseBool(s.VaultSkipVerify) if err == nil { v.VaultCAVerify = strconv.FormatBool(!verify) @@ -124,8 +123,14 @@ func transformConfig(svMap map[string]interface{}) (map[string]interface{}, erro return nil, fmt.Errorf("failed to convert config %T to JSON: %w", svMap, err) } - // convert the JSON back to a standardVault struct - sv := &standardVault{} + // convert the JSON back to a standardVault struct, default values are + // set in case the configuration does not provide all options + sv := &standardVault{ + VaultDestroyKeys: vaultDefaultDestroyKeys, + VaultNamespace: vaultDefaultNamespace, + VaultSkipVerify: strconv.FormatBool(!vaultDefaultCAVerify), + } + err = json.Unmarshal(data, sv) if err != nil { return nil, fmt.Errorf("failed to Unmarshal the vault configuration: %w", err) diff --git a/internal/kms/vault_tokens_test.go b/internal/kms/vault_tokens_test.go index df1e105f0..a398b7e80 100644 --- a/internal/kms/vault_tokens_test.go +++ b/internal/kms/vault_tokens_test.go @@ -19,6 +19,7 @@ package kms import ( "encoding/json" "errors" + "strconv" "strings" "testing" @@ -208,6 +209,18 @@ func TestTransformConfig(t *testing.T) { assert.Equal(t, config["vaultCAVerify"], "false") } +func TestTransformConfigDefaults(t *testing.T) { + t.Parallel() + cm := make(map[string]interface{}) + cm["KMS_PROVIDER"] = kmsTypeVaultTokens + + config, err := transformConfig(cm) + require.NoError(t, err) + assert.Equal(t, config["encryptionKMSType"], cm["KMS_PROVIDER"]) + assert.Equal(t, config["vaultDestroyKeys"], vaultDefaultDestroyKeys) + assert.Equal(t, config["vaultCAVerify"], strconv.FormatBool(vaultDefaultCAVerify)) +} + func TestVaultTokensKMSRegistered(t *testing.T) { t.Parallel() _, ok := kmsManager.providers[kmsTypeVaultTokens]