Merge pull request #286 from red-hat-storage/sync_ds--devel

Syncing latest changes from devel for ceph-csi
This commit is contained in:
openshift-merge-bot[bot] 2024-04-04 11:15:49 +00:00 committed by GitHub
commit e24107e158
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 82 additions and 62 deletions

View File

@ -196,6 +196,8 @@ charts and their default values.
| `secret.adminID` | Specifies the admin ID of the cephFS secret | `<plaintext ID>` |
| `secret.adminKey` | Specifies the key that corresponds to the adminID | `<Ceph auth key corresponding to ID above>` |
| `selinuxMount` | Mount the host /etc/selinux inside pods to support selinux-enabled filesystems | `true` |
| `CSIDriver.fsGroupPolicy` | Specifies the fsGroupPolicy for the CSI driver object | `File` |
| `CSIDriver.seLinuxMount` | Specify for efficient SELinux volume relabeling | `true` |
### Command Line

View File

@ -12,6 +12,6 @@ spec:
attachRequired: false
podInfoOnMount: false
fsGroupPolicy: {{ .Values.CSIDriver.fsGroupPolicy }}
{{- if and (semverCompare ">= 1.25.x" .Capabilities.KubeVersion.Version) .Values.seLinuxMount }}
{{- if and (semverCompare ">= 1.25.x" .Capabilities.KubeVersion.Version) .Values.CSIDriver.seLinuxMount }}
seLinuxMount: true
{{- end }}

View File

@ -225,6 +225,8 @@ charts and their default values.
| `secret.userKey` | Specifies the key that corresponds to the userID | `<Ceph auth key corresponding to ID above>` |
| `secret.encryptionPassphrase` | Specifies the encryption passphrase of the secret | `test_passphrase` |
| `selinuxMount` | Mount the host /etc/selinux inside pods to support selinux-enabled filesystems | `true` |
| `CSIDriver.fsGroupPolicy` | Specifies the fsGroupPolicy for the CSI driver object | `File` |
| `CSIDriver.seLinuxMount` | Specify for efficient SELinux volume relabeling | `true` |
### Command Line

View File

@ -12,6 +12,6 @@ spec:
attachRequired: true
podInfoOnMount: false
fsGroupPolicy: {{ .Values.CSIDriver.fsGroupPolicy }}
{{- if and (semverCompare ">= 1.25.x" .Capabilities.KubeVersion.Version) .Values.seLinuxMount }}
{{- if and (semverCompare ">= 1.25.x" .Capabilities.KubeVersion.Version) .Values.CSIDriver.seLinuxMount }}
seLinuxMount: true
{{- end }}

View File

@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
"slices"
"sort"
"time"
@ -36,7 +37,6 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"k8s.io/utils/strings/slices"
)
// validateCreateVolumeGroupSnapshotRequest validates the request for creating

View File

@ -150,6 +150,7 @@ func validateStriping(parameters map[string]string) error {
func (cs *ControllerServer) parseVolCreateRequest(
ctx context.Context,
req *csi.CreateVolumeRequest,
cr *util.Credentials,
) (*rbdVolume, error) {
// TODO (sbezverk) Last check for not exceeding total storage capacity
@ -226,6 +227,13 @@ func (cs *ControllerServer) parseVolCreateRequest(
return nil, status.Error(codes.InvalidArgument, err.Error())
}
err = rbdVol.Connect(cr)
if err != nil {
log.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err)
return nil, status.Error(codes.Internal, err.Error())
}
// NOTE: rbdVol does not contain VolID and RbdImageName populated, everything
// else is populated post create request parsing
return rbdVol, nil
@ -324,7 +332,7 @@ func (cs *ControllerServer) CreateVolume(
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
rbdVol, err := cs.parseVolCreateRequest(ctx, req)
rbdVol, err := cs.parseVolCreateRequest(ctx, req, cr)
if err != nil {
return nil, err
}
@ -337,17 +345,16 @@ func (cs *ControllerServer) CreateVolume(
}
defer cs.VolumeLocks.Release(req.GetName())
err = rbdVol.Connect(cr)
if err != nil {
log.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err)
return nil, status.Error(codes.Internal, err.Error())
}
parentVol, rbdSnap, err := checkContentSource(ctx, req, cr)
if err != nil {
return nil, err
}
if parentVol != nil {
defer parentVol.Destroy()
}
if rbdSnap != nil {
defer rbdSnap.Destroy()
}
err = updateTopologyConstraints(rbdVol, rbdSnap)
if err != nil {
@ -638,7 +645,6 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
rbdVol *rbdVolume,
snapshotID string,
) error {
rbdSnap := &rbdSnapshot{}
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
@ -646,7 +652,7 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
}
defer cs.SnapshotLocks.Release(snapshotID)
err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, secrets)
rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, secrets)
if err != nil {
if errors.Is(err, util.ErrPoolNotFound) {
log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
@ -656,10 +662,11 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
return status.Error(codes.Internal, err.Error())
}
defer rbdSnap.Destroy()
// update parent name(rbd image name in snapshot)
rbdSnap.RbdImageName = rbdSnap.RbdSnapName
parentVol := generateVolFromSnap(rbdSnap)
parentVol := rbdSnap.toVolume()
// as we are operating on single cluster reuse the connection
parentVol.conn = rbdVol.conn.Copy()
@ -789,8 +796,8 @@ func checkContentSource(
if snapshotID == "" {
return nil, nil, status.Errorf(codes.NotFound, "volume Snapshot ID cannot be empty")
}
rbdSnap := &rbdSnapshot{}
if err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil {
rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, req.GetSecrets())
if err != nil {
log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
if !errors.Is(err, ErrSnapNotFound) {
return nil, nil, status.Error(codes.Internal, err.Error())
@ -1230,7 +1237,7 @@ func cloneFromSnapshot(
cr *util.Credentials,
parameters map[string]string,
) (*csi.CreateSnapshotResponse, error) {
vol := generateVolFromSnap(rbdSnap)
vol := rbdSnap.toVolume()
err := vol.Connect(cr)
if err != nil {
uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr)
@ -1315,7 +1322,7 @@ func (cs *ControllerServer) doSnapshotClone(
cr *util.Credentials,
) (*rbdVolume, error) {
// generate cloned volume details from snapshot
cloneRbd := generateVolFromSnap(rbdSnap)
cloneRbd := rbdSnap.toVolume()
defer cloneRbd.Destroy()
// add image feature for cloneRbd
f := []string{librbd.FeatureNameLayering, librbd.FeatureNameDeepFlatten}
@ -1429,8 +1436,8 @@ func (cs *ControllerServer) DeleteSnapshot(
}
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
rbdSnap := &rbdSnapshot{}
if err = genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil {
rbdSnap, err := genSnapFromSnapID(ctx, snapshotID, cr, req.GetSecrets())
if err != nil {
// if error is ErrPoolNotFound, the pool is already deleted we don't
// need to worry about deleting snapshot or omap data, return success
if errors.Is(err, util.ErrPoolNotFound) {
@ -1443,12 +1450,16 @@ func (cs *ControllerServer) DeleteSnapshot(
// or partially complete (snap and snapOMap are garbage collected already), hence return
// success as deletion is complete
if errors.Is(err, util.ErrKeyNotFound) {
log.UsefulLog(ctx, "snapshot %s was been deleted already: %v", snapshotID, err)
return &csi.DeleteSnapshotResponse{}, nil
}
// if the error is ErrImageNotFound, We need to cleanup the image from
// trash and remove the metadata in OMAP.
if errors.Is(err, ErrImageNotFound) {
log.UsefulLog(ctx, "cleaning up leftovers of snapshot %s: %v", snapshotID, err)
err = cleanUpImageAndSnapReservation(ctx, rbdSnap, cr)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
@ -1459,6 +1470,7 @@ func (cs *ControllerServer) DeleteSnapshot(
return nil, status.Error(codes.Internal, err.Error())
}
defer rbdSnap.Destroy()
// safeguard against parallel create or delete requests against the same
// name
@ -1472,7 +1484,7 @@ func (cs *ControllerServer) DeleteSnapshot(
// Deleting snapshot and cloned volume
log.DebugLog(ctx, "deleting cloned rbd volume %s", rbdSnap.RbdSnapName)
rbdVol := generateVolFromSnap(rbdSnap)
rbdVol := rbdSnap.toVolume()
err = rbdVol.Connect(cr)
if err != nil {
@ -1503,7 +1515,7 @@ func (cs *ControllerServer) DeleteSnapshot(
// cleanUpImageAndSnapReservation cleans up the image from the trash and
// snapshot reservation in rados OMAP.
func cleanUpImageAndSnapReservation(ctx context.Context, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
rbdVol := generateVolFromSnap(rbdSnap)
rbdVol := rbdSnap.toVolume()
err := rbdVol.Connect(cr)
if err != nil {
return status.Error(codes.Internal, err.Error())

View File

@ -162,7 +162,7 @@ func checkSnapCloneExists(
snapData.ImagePool, rbdSnap.Pool)
}
vol := generateVolFromSnap(rbdSnap)
vol := rbdSnap.toVolume()
defer vol.Destroy()
err = vol.Connect(cr)
if err != nil {

View File

@ -949,54 +949,57 @@ func (ri *rbdImage) checkImageChainHasFeature(ctx context.Context, feature uint6
// genSnapFromSnapID generates a rbdSnapshot structure from the provided identifier, updating
// the structure with elements from on-disk snapshot metadata as well.
//
// NOTE: The returned rbdSnapshot can be non-nil in case of an error. That
// seems to be required for the DeleteSnapshot procedure, so that OMAP
// attributes can be cleaned-up.
func genSnapFromSnapID(
ctx context.Context,
rbdSnap *rbdSnapshot,
snapshotID string,
cr *util.Credentials,
secrets map[string]string,
) error {
) (*rbdSnapshot, error) {
var vi util.CSIIdentifier
rbdSnap.VolID = snapshotID
err := vi.DecomposeCSIID(rbdSnap.VolID)
err := vi.DecomposeCSIID(snapshotID)
if err != nil {
log.ErrorLog(ctx, "error decoding snapshot ID (%s) (%s)", err, rbdSnap.VolID)
log.ErrorLog(ctx, "error decoding snapshot ID (%s) (%s)", err, snapshotID)
return err
return nil, err
}
rbdSnap := &rbdSnapshot{}
rbdSnap.VolID = snapshotID
rbdSnap.ClusterID = vi.ClusterID
rbdSnap.Monitors, _, err = util.GetMonsAndClusterID(ctx, rbdSnap.ClusterID, false)
if err != nil {
log.ErrorLog(ctx, "failed getting mons (%s)", err)
return err
return nil, err
}
rbdSnap.Pool, err = util.GetPoolName(rbdSnap.Monitors, cr, vi.LocationID)
if err != nil {
return err
return nil, err
}
rbdSnap.JournalPool = rbdSnap.Pool
rbdSnap.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID)
if err != nil {
return err
return nil, err
}
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil {
return err
return nil, err
}
defer j.Destroy()
imageAttributes, err := j.GetImageAttributes(
ctx, rbdSnap.Pool, vi.ObjectUUID, true)
if err != nil {
return err
return rbdSnap, err
}
rbdSnap.ImageID = imageAttributes.ImageID
rbdSnap.RequestName = imageAttributes.RequestName
@ -1009,48 +1012,48 @@ func genSnapFromSnapID(
rbdSnap.JournalPool, err = util.GetPoolName(rbdSnap.Monitors, cr, imageAttributes.JournalPoolID)
if err != nil {
// TODO: If pool is not found we may leak the image (as DeleteSnapshot will return success)
return err
return rbdSnap, err
}
}
err = rbdSnap.Connect(cr)
if err != nil {
return rbdSnap, fmt.Errorf("failed to connect to %q: %w",
rbdSnap, err)
}
defer func() {
if err != nil {
rbdSnap.Destroy()
}
}()
if err != nil {
return fmt.Errorf("failed to connect to %q: %w",
rbdSnap, err)
}
if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeBlock {
err = rbdSnap.configureBlockEncryption(imageAttributes.KmsID, secrets)
if err != nil {
return fmt.Errorf("failed to configure block encryption for "+
return rbdSnap, fmt.Errorf("failed to configure block encryption for "+
"%q: %w", rbdSnap, err)
}
}
if imageAttributes.KmsID != "" && imageAttributes.EncryptionType == util.EncryptionTypeFile {
err = rbdSnap.configureFileEncryption(ctx, imageAttributes.KmsID, secrets)
if err != nil {
return fmt.Errorf("failed to configure file encryption for "+
return rbdSnap, fmt.Errorf("failed to configure file encryption for "+
"%q: %w", rbdSnap, err)
}
}
err = updateSnapshotDetails(rbdSnap)
if err != nil {
return fmt.Errorf("failed to update snapshot details for %q: %w", rbdSnap, err)
return rbdSnap, fmt.Errorf("failed to update snapshot details for %q: %w", rbdSnap, err)
}
return err
return rbdSnap, err
}
// updateSnapshotDetails will copy the details from the rbdVolume to the
// rbdSnapshot. example copying size from rbdVolume to rbdSnapshot.
func updateSnapshotDetails(rbdSnap *rbdSnapshot) error {
vol := generateVolFromSnap(rbdSnap)
vol := rbdSnap.toVolume()
err := vol.Connect(rbdSnap.conn.Creds)
if err != nil {
return err

View File

@ -98,23 +98,24 @@ func cleanUpSnapshot(
return nil
}
func generateVolFromSnap(rbdSnap *rbdSnapshot) *rbdVolume {
vol := new(rbdVolume)
vol.ClusterID = rbdSnap.ClusterID
vol.VolID = rbdSnap.VolID
vol.Monitors = rbdSnap.Monitors
vol.Pool = rbdSnap.Pool
vol.JournalPool = rbdSnap.JournalPool
vol.RadosNamespace = rbdSnap.RadosNamespace
vol.RbdImageName = rbdSnap.RbdSnapName
vol.ImageID = rbdSnap.ImageID
// copyEncryptionConfig cannot be used here because the volume and the
// snapshot will have the same volumeID which cases the panic in
// copyEncryptionConfig function.
vol.blockEncryption = rbdSnap.blockEncryption
vol.fileEncryption = rbdSnap.fileEncryption
return vol
func (rbdSnap *rbdSnapshot) toVolume() *rbdVolume {
return &rbdVolume{
rbdImage: rbdImage{
ClusterID: rbdSnap.ClusterID,
VolID: rbdSnap.VolID,
Monitors: rbdSnap.Monitors,
Pool: rbdSnap.Pool,
JournalPool: rbdSnap.JournalPool,
RadosNamespace: rbdSnap.RadosNamespace,
RbdImageName: rbdSnap.RbdSnapName,
ImageID: rbdSnap.ImageID,
// copyEncryptionConfig cannot be used here because the volume and the
// snapshot will have the same volumeID which cases the panic in
// copyEncryptionConfig function.
blockEncryption: rbdSnap.blockEncryption,
fileEncryption: rbdSnap.fileEncryption,
},
}
}
func undoSnapshotCloning(