From 694672b154b84149f22accd3ad98a58958ef0e73 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Thu, 21 Mar 2024 10:48:50 +0100 Subject: [PATCH 1/8] cleanup: do not pass an empty snapshot to genSnapFromSnapID() Just like GenVolFromVolID() the genSnapFromSnapID() function can return a snapshot. There is no need to allocated an empty snapshot and pass that to the genSnapFromSnapID() function. Signed-off-by: Niels de Vos --- internal/rbd/controllerserver.go | 11 ++++---- internal/rbd/rbd_util.go | 45 +++++++++++++++++--------------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 75197c8bb..5f5e70ecd 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -638,7 +638,6 @@ func (cs *ControllerServer) createVolumeFromSnapshot( rbdVol *rbdVolume, snapshotID string, ) error { - rbdSnap := &rbdSnapshot{} if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired { log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID) @@ -646,7 +645,7 @@ func (cs *ControllerServer) createVolumeFromSnapshot( } defer cs.SnapshotLocks.Release(snapshotID) - err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, secrets) + rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, secrets) if err != nil { if errors.Is(err, util.ErrPoolNotFound) { log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err) @@ -789,8 +788,8 @@ func checkContentSource( if snapshotID == "" { return nil, nil, status.Errorf(codes.NotFound, "volume Snapshot ID cannot be empty") } - rbdSnap := &rbdSnapshot{} - if err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil { + rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, req.GetSecrets()) + if err != nil { log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err) if !errors.Is(err, ErrSnapNotFound) { return nil, nil, status.Error(codes.Internal, err.Error()) @@ -1429,8 +1428,8 @@ func (cs *ControllerServer) DeleteSnapshot( } defer cs.OperationLocks.ReleaseDeleteLock(snapshotID) - rbdSnap := &rbdSnapshot{} - if err = genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil { + rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, req.GetSecrets()) + if err != nil { // if error is ErrPoolNotFound, the pool is already deleted we don't // need to worry about deleting snapshot or omap data, return success if errors.Is(err, util.ErrPoolNotFound) { diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index b318d0f62..13e226bf4 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -949,54 +949,57 @@ func (ri *rbdImage) checkImageChainHasFeature(ctx context.Context, feature uint6 // genSnapFromSnapID generates a rbdSnapshot structure from the provided identifier, updating // the structure with elements from on-disk snapshot metadata as well. +// +// NOTE: The returned rbdSnapshot can be non-nil in case of an error. That +// seems to be required for the DeleteSnapshot procedure, so that OMAP +// attributes can be cleaned-up. func genSnapFromSnapID( ctx context.Context, - rbdSnap *rbdSnapshot, snapshotID string, cr *util.Credentials, secrets map[string]string, -) error { +) (*rbdSnapshot, error) { var vi util.CSIIdentifier - rbdSnap.VolID = snapshotID - - err := vi.DecomposeCSIID(rbdSnap.VolID) + err := vi.DecomposeCSIID(snapshotID) if err != nil { - log.ErrorLog(ctx, "error decoding snapshot ID (%s) (%s)", err, rbdSnap.VolID) + log.ErrorLog(ctx, "error decoding snapshot ID (%s) (%s)", err, snapshotID) - return err + return nil, err } + rbdSnap := &rbdSnapshot{} + rbdSnap.VolID = snapshotID rbdSnap.ClusterID = vi.ClusterID rbdSnap.Monitors, _, err = util.GetMonsAndClusterID(ctx, rbdSnap.ClusterID, false) if err != nil { log.ErrorLog(ctx, "failed getting mons (%s)", err) - return err + return nil, err } rbdSnap.Pool, err = util.GetPoolName(rbdSnap.Monitors, cr, vi.LocationID) if err != nil { - return err + return nil, err } rbdSnap.JournalPool = rbdSnap.Pool rbdSnap.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID) if err != nil { - return err + return nil, err } j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr) if err != nil { - return err + return nil, err } defer j.Destroy() imageAttributes, err := j.GetImageAttributes( ctx, rbdSnap.Pool, vi.ObjectUUID, true) if err != nil { - return err + return rbdSnap, err } rbdSnap.ImageID = imageAttributes.ImageID rbdSnap.RequestName = imageAttributes.RequestName @@ -1009,42 +1012,42 @@ func genSnapFromSnapID( rbdSnap.JournalPool, err = util.GetPoolName(rbdSnap.Monitors, cr, imageAttributes.JournalPoolID) if err != nil { // TODO: If pool is not found we may leak the image (as DeleteSnapshot will return success) - return err + return rbdSnap, err } } err = rbdSnap.Connect(cr) + if err != nil { + return rbdSnap, fmt.Errorf("failed to connect to %q: %w", + rbdSnap, err) + } defer func() { if err != nil { rbdSnap.Destroy() } }() - if err != nil { - return fmt.Errorf("failed to connect to %q: %w", - rbdSnap, err) - } if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeBlock { err = rbdSnap.configureBlockEncryption(imageAttributes.KmsID, secrets) if err != nil { - return fmt.Errorf("failed to configure block encryption for "+ + return rbdSnap, fmt.Errorf("failed to configure block encryption for "+ "%q: %w", rbdSnap, err) } } if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeFile { err = rbdSnap.configureFileEncryption(ctx, imageAttributes.KmsID, secrets) if err != nil { - return fmt.Errorf("failed to configure file encryption for "+ + return rbdSnap, fmt.Errorf("failed to configure file encryption for "+ "%q: %w", rbdSnap, err) } } err = updateSnapshotDetails(rbdSnap) if err != nil { - return fmt.Errorf("failed to update snapshot details for %q: %w", rbdSnap, err) + return rbdSnap, fmt.Errorf("failed to update snapshot details for %q: %w", rbdSnap, err) } - return err + return rbdSnap, err } // updateSnapshotDetails will copy the details from the rbdVolume to the From 8b959f2b6d2f0b86f27486a1469a0b270000875e Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Thu, 21 Mar 2024 10:59:50 +0100 Subject: [PATCH 2/8] rbd: free snapshot resources after allocation Not all snapshot objects are free'd correctly after they were allocated. It is possible that some connections to the Ceph cluster were never closed. This does not need to be a noticeable problem, as connections are re-used where possible, but it isn't clean either. Signed-off-by: Niels de Vos --- internal/rbd/controllerserver.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 5f5e70ecd..cb2782851 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -348,6 +348,12 @@ func (cs *ControllerServer) CreateVolume( if err != nil { return nil, err } + if parentVol != nil { + defer parentVol.Destroy() + } + if rbdSnap != nil { + defer rbdSnap.Destroy() + } err = updateTopologyConstraints(rbdVol, rbdSnap) if err != nil { @@ -655,6 +661,7 @@ func (cs *ControllerServer) createVolumeFromSnapshot( return status.Error(codes.Internal, err.Error()) } + defer rbdSnap.Destroy() // update parent name(rbd image name in snapshot) rbdSnap.RbdImageName = rbdSnap.RbdSnapName @@ -1458,6 +1465,7 @@ func (cs *ControllerServer) DeleteSnapshot( return nil, status.Error(codes.Internal, err.Error()) } + defer rbdSnap.Destroy() // safeguard against parallel create or delete requests against the same // name From 7eb8861479b92286cba138287ee73a5e3f526583 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Fri, 22 Mar 2024 14:43:25 +0100 Subject: [PATCH 3/8] rbd: let parseVolCreateRequest() return a connected rbdVolume By returning a connected rbdVolume in parseVolCreateRequest(), the CreateVolume() function can be simplified a little. There is no need to call the additional Connect() and detect failures with it. Signed-off-by: Niels de Vos --- internal/rbd/controllerserver.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index cb2782851..89f0191ec 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -150,6 +150,7 @@ func validateStriping(parameters map[string]string) error { func (cs *ControllerServer) parseVolCreateRequest( ctx context.Context, req *csi.CreateVolumeRequest, + cr *util.Credentials, ) (*rbdVolume, error) { // TODO (sbezverk) Last check for not exceeding total storage capacity @@ -226,6 +227,13 @@ func (cs *ControllerServer) parseVolCreateRequest( return nil, status.Error(codes.InvalidArgument, err.Error()) } + err = rbdVol.Connect(cr) + if err != nil { + log.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err) + + return nil, status.Error(codes.Internal, err.Error()) + } + // NOTE: rbdVol does not contain VolID and RbdImageName populated, everything // else is populated post create request parsing return rbdVol, nil @@ -324,7 +332,7 @@ func (cs *ControllerServer) CreateVolume( return nil, status.Error(codes.InvalidArgument, err.Error()) } defer cr.DeleteCredentials() - rbdVol, err := cs.parseVolCreateRequest(ctx, req) + rbdVol, err := cs.parseVolCreateRequest(ctx, req, cr) if err != nil { return nil, err } @@ -337,13 +345,6 @@ func (cs *ControllerServer) CreateVolume( } defer cs.VolumeLocks.Release(req.GetName()) - err = rbdVol.Connect(cr) - if err != nil { - log.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err) - - return nil, status.Error(codes.Internal, err.Error()) - } - parentVol, rbdSnap, err := checkContentSource(ctx, req, cr) if err != nil { return nil, err From 3576a8897adee60a557f06d00bba1e1a90cb4a2a Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Tue, 26 Mar 2024 09:51:19 +0100 Subject: [PATCH 4/8] cleanup: reformat generateVolFromSnap() to rbdSnapshot.toVolume() Signed-off-by: Niels de Vos --- internal/rbd/controllerserver.go | 10 ++++----- internal/rbd/rbd_journal.go | 2 +- internal/rbd/rbd_util.go | 2 +- internal/rbd/snapshot.go | 35 ++++++++++++++++---------------- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 89f0191ec..01d4b6c58 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -666,7 +666,7 @@ func (cs *ControllerServer) createVolumeFromSnapshot( // update parent name(rbd image name in snapshot) rbdSnap.RbdImageName = rbdSnap.RbdSnapName - parentVol := generateVolFromSnap(rbdSnap) + parentVol := rbdSnap.toVolume() // as we are operating on single cluster reuse the connection parentVol.conn = rbdVol.conn.Copy() @@ -1237,7 +1237,7 @@ func cloneFromSnapshot( cr *util.Credentials, parameters map[string]string, ) (*csi.CreateSnapshotResponse, error) { - vol := generateVolFromSnap(rbdSnap) + vol := rbdSnap.toVolume() err := vol.Connect(cr) if err != nil { uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr) @@ -1322,7 +1322,7 @@ func (cs *ControllerServer) doSnapshotClone( cr *util.Credentials, ) (*rbdVolume, error) { // generate cloned volume details from snapshot - cloneRbd := generateVolFromSnap(rbdSnap) + cloneRbd := rbdSnap.toVolume() defer cloneRbd.Destroy() // add image feature for cloneRbd f := []string{librbd.FeatureNameLayering, librbd.FeatureNameDeepFlatten} @@ -1480,7 +1480,7 @@ func (cs *ControllerServer) DeleteSnapshot( // Deleting snapshot and cloned volume log.DebugLog(ctx, "deleting cloned rbd volume %s", rbdSnap.RbdSnapName) - rbdVol := generateVolFromSnap(rbdSnap) + rbdVol := rbdSnap.toVolume() err = rbdVol.Connect(cr) if err != nil { @@ -1511,7 +1511,7 @@ func (cs *ControllerServer) DeleteSnapshot( // cleanUpImageAndSnapReservation cleans up the image from the trash and // snapshot reservation in rados OMAP. func cleanUpImageAndSnapReservation(ctx context.Context, rbdSnap *rbdSnapshot, cr *util.Credentials) error { - rbdVol := generateVolFromSnap(rbdSnap) + rbdVol := rbdSnap.toVolume() err := rbdVol.Connect(cr) if err != nil { return status.Error(codes.Internal, err.Error()) diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index f7c2f86c7..04ad9138e 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -162,7 +162,7 @@ func checkSnapCloneExists( snapData.ImagePool, rbdSnap.Pool) } - vol := generateVolFromSnap(rbdSnap) + vol := rbdSnap.toVolume() defer vol.Destroy() err = vol.Connect(cr) if err != nil { diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index 13e226bf4..31b330b4d 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -1053,7 +1053,7 @@ func genSnapFromSnapID( // updateSnapshotDetails will copy the details from the rbdVolume to the // rbdSnapshot. example copying size from rbdVolume to rbdSnapshot. func updateSnapshotDetails(rbdSnap *rbdSnapshot) error { - vol := generateVolFromSnap(rbdSnap) + vol := rbdSnap.toVolume() err := vol.Connect(rbdSnap.conn.Creds) if err != nil { return err diff --git a/internal/rbd/snapshot.go b/internal/rbd/snapshot.go index bb420475c..d6dcd48c2 100644 --- a/internal/rbd/snapshot.go +++ b/internal/rbd/snapshot.go @@ -98,23 +98,24 @@ func cleanUpSnapshot( return nil } -func generateVolFromSnap(rbdSnap *rbdSnapshot) *rbdVolume { - vol := new(rbdVolume) - vol.ClusterID = rbdSnap.ClusterID - vol.VolID = rbdSnap.VolID - vol.Monitors = rbdSnap.Monitors - vol.Pool = rbdSnap.Pool - vol.JournalPool = rbdSnap.JournalPool - vol.RadosNamespace = rbdSnap.RadosNamespace - vol.RbdImageName = rbdSnap.RbdSnapName - vol.ImageID = rbdSnap.ImageID - // copyEncryptionConfig cannot be used here because the volume and the - // snapshot will have the same volumeID which cases the panic in - // copyEncryptionConfig function. - vol.blockEncryption = rbdSnap.blockEncryption - vol.fileEncryption = rbdSnap.fileEncryption - - return vol +func (rbdSnap *rbdSnapshot) toVolume() *rbdVolume { + return &rbdVolume{ + rbdImage: rbdImage{ + ClusterID: rbdSnap.ClusterID, + VolID: rbdSnap.VolID, + Monitors: rbdSnap.Monitors, + Pool: rbdSnap.Pool, + JournalPool: rbdSnap.JournalPool, + RadosNamespace: rbdSnap.RadosNamespace, + RbdImageName: rbdSnap.RbdSnapName, + ImageID: rbdSnap.ImageID, + // copyEncryptionConfig cannot be used here because the volume and the + // snapshot will have the same volumeID which cases the panic in + // copyEncryptionConfig function. + blockEncryption: rbdSnap.blockEncryption, + fileEncryption: rbdSnap.fileEncryption, + }, + } } func undoSnapshotCloning( From 3128593e6adf6d285f48957ee80d7582d46c7a9c Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Tue, 26 Mar 2024 15:38:20 +0100 Subject: [PATCH 5/8] rbd: add extra logging while cleaning up snapshots Signed-off-by: Niels de Vos --- internal/rbd/controllerserver.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 01d4b6c58..4f07ce5f0 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -1450,12 +1450,16 @@ func (cs *ControllerServer) DeleteSnapshot( // or partially complete (snap and snapOMap are garbage collected already), hence return // success as deletion is complete if errors.Is(err, util.ErrKeyNotFound) { + log.UsefulLog(ctx, "snapshot %s was been deleted already: %v", snapshotID, err) + return &csi.DeleteSnapshotResponse{}, nil } // if the error is ErrImageNotFound, We need to cleanup the image from // trash and remove the metadata in OMAP. if errors.Is(err, ErrImageNotFound) { + log.UsefulLog(ctx, "cleaning up leftovers of snapshot %s: %v", snapshotID, err) + err = cleanUpImageAndSnapReservation(ctx, rbdSnap, cr) if err != nil { return nil, status.Error(codes.Internal, err.Error()) From 6471de8d1c1389f6335d36fc83b86b7dccb1b49c Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 28 Mar 2024 17:03:41 +0530 Subject: [PATCH 6/8] helm: fix seLinuxMount option for csi driver This commit fixes the typo from `.Values.seLinuxMount` to `.Values.CSIDriver.seLinuxMount` used in helm charts. Signed-off-by: Praveen M --- charts/ceph-csi-cephfs/templates/csidriver-crd.yaml | 2 +- charts/ceph-csi-rbd/templates/csidriver-crd.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/ceph-csi-cephfs/templates/csidriver-crd.yaml b/charts/ceph-csi-cephfs/templates/csidriver-crd.yaml index 5aacbe271..821bd1925 100644 --- a/charts/ceph-csi-cephfs/templates/csidriver-crd.yaml +++ b/charts/ceph-csi-cephfs/templates/csidriver-crd.yaml @@ -12,6 +12,6 @@ spec: attachRequired: false podInfoOnMount: false fsGroupPolicy: {{ .Values.CSIDriver.fsGroupPolicy }} -{{- if and (semverCompare ">= 1.25.x" .Capabilities.KubeVersion.Version) .Values.seLinuxMount }} +{{- if and (semverCompare ">= 1.25.x" .Capabilities.KubeVersion.Version) .Values.CSIDriver.seLinuxMount }} seLinuxMount: true {{- end }} diff --git a/charts/ceph-csi-rbd/templates/csidriver-crd.yaml b/charts/ceph-csi-rbd/templates/csidriver-crd.yaml index d1524527e..f3f1b2e08 100644 --- a/charts/ceph-csi-rbd/templates/csidriver-crd.yaml +++ b/charts/ceph-csi-rbd/templates/csidriver-crd.yaml @@ -12,6 +12,6 @@ spec: attachRequired: true podInfoOnMount: false fsGroupPolicy: {{ .Values.CSIDriver.fsGroupPolicy }} -{{- if and (semverCompare ">= 1.25.x" .Capabilities.KubeVersion.Version) .Values.seLinuxMount }} +{{- if and (semverCompare ">= 1.25.x" .Capabilities.KubeVersion.Version) .Values.CSIDriver.seLinuxMount }} seLinuxMount: true {{- end }} From 8f2200ba518d92b85592593672bbcf94d0d92f65 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Fri, 29 Mar 2024 10:04:42 +0100 Subject: [PATCH 7/8] cleanup: use standard Golang "slices" instead of k8s package The "slices" package has been introduced in Go 1.21 and can be used instead of the Kubernetes package that will be replaced by the standard package at one point too. Signed-off-by: Niels de Vos --- internal/cephfs/groupcontrollerserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/cephfs/groupcontrollerserver.go b/internal/cephfs/groupcontrollerserver.go index 42ab7aec1..2e2a5b7d6 100644 --- a/internal/cephfs/groupcontrollerserver.go +++ b/internal/cephfs/groupcontrollerserver.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "slices" "sort" "time" @@ -36,7 +37,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" - "k8s.io/utils/strings/slices" ) // validateCreateVolumeGroupSnapshotRequest validates the request for creating From 4e26bf9d8f8f89ab166b8051d265cbfa3e1123e6 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Mon, 1 Apr 2024 08:53:36 +0530 Subject: [PATCH 8/8] doc: csi driver object options Signed-off-by: Praveen M --- charts/ceph-csi-cephfs/README.md | 2 ++ charts/ceph-csi-rbd/README.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/charts/ceph-csi-cephfs/README.md b/charts/ceph-csi-cephfs/README.md index 3de321687..5ee44bde0 100644 --- a/charts/ceph-csi-cephfs/README.md +++ b/charts/ceph-csi-cephfs/README.md @@ -196,6 +196,8 @@ charts and their default values. | `secret.adminID` | Specifies the admin ID of the cephFS secret | `` | | `secret.adminKey` | Specifies the key that corresponds to the adminID | `<Ceph auth key corresponding to ID above>` | | `selinuxMount` | Mount the host /etc/selinux inside pods to support selinux-enabled filesystems | `true` | +| `CSIDriver.fsGroupPolicy` | Specifies the fsGroupPolicy for the CSI driver object | `File` | +| `CSIDriver.seLinuxMount` | Specify for efficient SELinux volume relabeling | `true` | ### Command Line diff --git a/charts/ceph-csi-rbd/README.md b/charts/ceph-csi-rbd/README.md index 6ab8b7a36..def55d6f4 100644 --- a/charts/ceph-csi-rbd/README.md +++ b/charts/ceph-csi-rbd/README.md @@ -225,6 +225,8 @@ charts and their default values. | `secret.userKey` | Specifies the key that corresponds to the userID | `<Ceph auth key corresponding to ID above>` | | `secret.encryptionPassphrase` | Specifies the encryption passphrase of the secret | `test_passphrase` | | `selinuxMount` | Mount the host /etc/selinux inside pods to support selinux-enabled filesystems | `true` | +| `CSIDriver.fsGroupPolicy` | Specifies the fsGroupPolicy for the CSI driver object | `File` | +| `CSIDriver.seLinuxMount` | Specify for efficient SELinux volume relabeling | `true` | ### Command Line