Merge pull request #188 from ceph/devel

Sync the upstream changes from `ceph/ceph-csi:devel` into the `devel` branch.
This commit is contained in:
OpenShift Merge Robot 2023-09-04 08:17:12 +02:00 committed by GitHub
commit cf3f32f999
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 199 additions and 15 deletions

View File

@ -186,7 +186,7 @@ run-e2e: NAMESPACE ?= cephcsi-e2e-$(shell uuidgen | cut -d- -f1)
run-e2e:
@test -e e2e.test || $(MAKE) e2e.test
cd e2e && \
../e2e.test -test.v -ginkgo.timeout="${E2E_TIMEOUT}" --deploy-timeout="${DEPLOY_TIMEOUT}" --cephcsi-namespace=$(NAMESPACE) $(E2E_ARGS)
../e2e.test -test.v -ginkgo.v -ginkgo.timeout="${E2E_TIMEOUT}" --deploy-timeout="${DEPLOY_TIMEOUT}" --cephcsi-namespace=$(NAMESPACE) $(E2E_ARGS)
.container-cmd:
@test -n "$(shell which $(CONTAINER_CMD) 2>/dev/null)" || { echo "Missing container support, install Podman or Docker"; exit 1; }

View File

@ -2030,6 +2030,167 @@ var _ = Describe(cephfsType, func() {
}
})
By("create RWX clone from ROX PVC", func() {
pvc, err := loadPVC(pvcPath)
if err != nil {
framework.Failf("failed to load PVC: %v", err)
}
pvc.Namespace = f.UniqueName
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
framework.Failf("failed to create PVC: %v", err)
}
_, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
if err != nil {
framework.Failf("failed to get PV object for %s: %v", pvc.Name, err)
}
app, err := loadApp(appPath)
if err != nil {
framework.Failf("failed to load application: %v", err)
}
app.Namespace = f.UniqueName
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
appLabels := map[string]string{
appKey: appLabel,
}
app.Labels = appLabels
optApp := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, appLabels[appKey]),
}
err = writeDataInPod(app, &optApp, f)
if err != nil {
framework.Failf("failed to write data: %v", err)
}
appTestFilePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
err = appendToFileInContainer(f, app, appTestFilePath, "hello", &optApp)
if err != nil {
framework.Failf("failed to append data: %v", err)
}
parentFileSum, err := calculateSHA512sum(f, app, appTestFilePath, &optApp)
if err != nil {
framework.Failf("failed to get SHA512 sum for file: %v", err)
}
snap := getSnapshot(snapshotPath)
snap.Namespace = f.UniqueName
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
err = createSnapshot(&snap, deployTimeout)
if err != nil {
framework.Failf("failed to create snapshot: %v", err)
}
validateCephFSSnapshotCount(f, 1, subvolumegroup, pv)
pvcClone, err := loadPVC(pvcClonePath)
if err != nil {
framework.Failf("failed to load PVC: %v", err)
}
// Snapshot-backed volumes support read-only access modes only.
pvcClone.Spec.DataSource.Name = snap.Name
pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}
pvcClone.Namespace = f.UniqueName
err = createPVCAndvalidatePV(c, pvcClone, deployTimeout)
if err != nil {
framework.Failf("failed to create PVC: %v", err)
}
validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup)
// create RWX clone from ROX PVC
pvcRWXClone, err := loadPVC(pvcSmartClonePath)
if err != nil {
framework.Failf("failed to load PVC: %v", err)
}
pvcRWXClone.Spec.DataSource.Name = pvcClone.Name
pvcRWXClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}
pvcRWXClone.Namespace = f.UniqueName
appClone, err := loadApp(appPath)
if err != nil {
framework.Failf("failed to load application: %v", err)
}
appCloneLabels := map[string]string{
appKey: appCloneLabel,
}
appClone.Name = f.UniqueName + "-app"
appClone.Namespace = f.UniqueName
appClone.Labels = appCloneLabels
appClone.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcRWXClone.Name
optAppClone := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, appCloneLabels[appKey]),
}
err = createPVCAndApp("", f, pvcRWXClone, appClone, deployTimeout)
if err != nil {
framework.Failf("failed to create PVC and app: %v", err)
}
// 2 subvolumes should be created 1 for parent PVC and 1 for
// RWX clone PVC.
validateSubvolumeCount(f, 2, fileSystemName, subvolumegroup)
appCloneTestFilePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
cloneFileSum, err := calculateSHA512sum(f, appClone, appCloneTestFilePath, &optAppClone)
if err != nil {
framework.Failf("failed to get SHA512 sum for file: %v", err)
}
if parentFileSum != cloneFileSum {
framework.Failf(
"SHA512 sums of files in parent and ROX should not differ. parentFileSum: %s cloneFileSum: %s",
parentFileSum,
cloneFileSum)
}
// Now try to write to the PVC as its a RWX PVC
err = appendToFileInContainer(f, app, appCloneTestFilePath, "testing", &optApp)
if err != nil {
framework.Failf("failed to append data: %v", err)
}
// Deleting snapshot before deleting pvcClone should succeed. It will be
// deleted once all volumes that are backed by this snapshot are gone.
err = deleteSnapshot(&snap, deployTimeout)
if err != nil {
framework.Failf("failed to delete snapshot: %v", err)
}
// delete parent pvc and app
err = deletePVCAndApp("", f, pvc, app)
if err != nil {
framework.Failf("failed to delete PVC or application: %v", err)
}
// delete ROX clone PVC
err = deletePVCAndValidatePV(c, pvcClone, deployTimeout)
if err != nil {
framework.Failf("failed to delete PVC or application: %v", err)
}
// delete RWX clone PVC and app
err = deletePVCAndApp("", f, pvcRWXClone, appClone)
if err != nil {
framework.Failf("failed to delete PVC or application: %v", err)
}
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
validateOmapCount(f, 0, cephfsType, metadataPool, volumesType)
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}
err = createCephfsStorageClass(f.ClientSet, f, false, nil)
if err != nil {
framework.Failf("failed to create CephFS storageclass: %v", err)
}
})
if testCephFSFscrypt {
kmsToTest := map[string]kmsConfig{
"secrets-metadata-test": secretsMetadataKMS,

View File

@ -214,6 +214,7 @@ func checkValidCreateVolumeRequest(
sID *store.SnapshotIdentifier,
req *csi.CreateVolumeRequest,
) error {
volCaps := req.GetVolumeCapabilities()
switch {
case pvID != nil:
if vol.Size < parentVol.Size {
@ -224,12 +225,12 @@ func checkValidCreateVolumeRequest(
vol.Size)
}
if vol.BackingSnapshot {
return errors.New("cloning snapshot-backed volumes is currently not supported")
if parentVol.BackingSnapshot && store.IsVolumeCreateRO(volCaps) {
return errors.New("creating read-only clone from a snapshot-backed volume is not supported")
}
case sID != nil:
if vol.BackingSnapshot {
volCaps := req.GetVolumeCapabilities()
isRO := store.IsVolumeCreateRO(volCaps)
if !isRO {
return errors.New("backingSnapshot may be used only with read-only access modes")
@ -298,6 +299,23 @@ func (cs *ControllerServer) CreateVolume(
return nil, status.Error(codes.InvalidArgument, err.Error())
}
// As we are trying to create RWX volume from backing snapshot, we need to
// retrieve the snapshot details from the backing snapshot and create a
// subvolume clone from the snapshot.
if parentVol != nil && parentVol.BackingSnapshot && !store.IsVolumeCreateRO(req.VolumeCapabilities) {
// unset pvID as we dont have real subvolume for the parent volumeID as its a backing snapshot
pvID = nil
parentVol, _, sID, err = store.NewSnapshotOptionsFromID(ctx, parentVol.BackingSnapshotID, cr,
req.GetSecrets(), cs.ClusterName, cs.SetMetadata)
if err != nil {
if errors.Is(err, cerrors.ErrSnapNotFound) {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
}
vID, err := store.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr, cs.ClusterName, cs.SetMetadata)
if err != nil {
if cerrors.IsCloneRetryError(err) {

View File

@ -513,6 +513,7 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
// store the image creation time for resync
_, err = rbdVol.GetMetadata(imageCreationTimeKey)
if err != nil && errors.Is(err, librbd.ErrNotFound) {
log.DebugLog(ctx, "setting image creation time %s for %s", creationTime, rbdVol)
err = rbdVol.SetMetadata(imageCreationTimeKey, timestampToString(creationTime))
}
if err != nil {
@ -670,8 +671,11 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
// image creation time is stored in the image metadata. it looks like
// `"seconds:1692879841 nanos:631526669"`
// If the image gets resynced the local image creation time will be
// lost, if the keys is not present in the image metadata then we can
// assume that the image is already resynced.
savedImageTime, err := rbdVol.GetMetadata(imageCreationTimeKey)
if err != nil {
if err != nil && !errors.Is(err, librbd.ErrNotFound) {
return nil, status.Errorf(codes.Internal,
"failed to get %s key from image metadata for %s: %s",
imageCreationTimeKey,
@ -679,18 +683,19 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
err.Error())
}
st, err := timestampFromString(savedImageTime)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to parse image creation time: %s", err.Error())
if savedImageTime != "" {
st, sErr := timestampFromString(savedImageTime)
if sErr != nil {
return nil, status.Errorf(codes.Internal, "failed to parse image creation time: %s", sErr.Error())
}
log.DebugLog(ctx, "image %s, savedImageTime=%v, currentImageTime=%v", rbdVol, st, creationTime.AsTime())
if req.Force && st.Equal(creationTime.AsTime()) {
err = rbdVol.ResyncVol(localStatus)
if err != nil {
return nil, getGRPCError(err)
}
}
}
if !ready {
err = checkVolumeResyncStatus(localStatus)