mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-14 02:10:21 +00:00
rbd: Add support for rbd ROX PVC mounting
if the PVC access mode is ReadOnlyMany or single node readonly, mounting the rbd device path to the staging path as readonly to avoid the write operation. If the PVC acccess mode is readonly, mapping rbd images as readonly. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
da40d8e05e
commit
649aeb7aaf
118
e2e/rbd.go
118
e2e/rbd.go
@ -5,6 +5,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo" // nolint
|
. "github.com/onsi/ginkgo" // nolint
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
@ -602,6 +604,122 @@ var _ = Describe("RBD", func() {
|
|||||||
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
By("create ROX PVC clone and mount it to multiple pods", func() {
|
||||||
|
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("failed to get server version with error %v", err)
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
// snapshot beta is only supported from v1.17+
|
||||||
|
if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") {
|
||||||
|
// create pvc and bind it to an app
|
||||||
|
pvc, err := loadPVC(pvcPath)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
pvc.Namespace = f.UniqueName
|
||||||
|
app, err := loadApp(appPath)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
app.Namespace = f.UniqueName
|
||||||
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete pod as we should not create snapshot for in-use pvc
|
||||||
|
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
snap := getSnapshot(snapshotPath)
|
||||||
|
snap.Namespace = f.UniqueName
|
||||||
|
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||||
|
|
||||||
|
err = createSnapshot(&snap, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
pvcClone, err := loadPVC(pvcClonePath)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// create clone PVC as ROX
|
||||||
|
pvcClone.Namespace = f.UniqueName
|
||||||
|
pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}
|
||||||
|
err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
appClone, err := loadApp(appClonePath)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
totalCount := 4
|
||||||
|
appClone.Namespace = f.UniqueName
|
||||||
|
appClone.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name
|
||||||
|
|
||||||
|
// create pvc and app
|
||||||
|
for i := 0; i < totalCount; i++ {
|
||||||
|
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||||
|
label := map[string]string{
|
||||||
|
"app": name,
|
||||||
|
}
|
||||||
|
appClone.Labels = label
|
||||||
|
appClone.Name = name
|
||||||
|
err = createApp(f.ClientSet, appClone, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < totalCount; i++ {
|
||||||
|
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||||
|
opt := metav1.ListOptions{
|
||||||
|
LabelSelector: fmt.Sprintf("app=%s", name),
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||||
|
_, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), appClone.Namespace, &opt)
|
||||||
|
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||||
|
if !strings.Contains(stdErr, readOnlyErr) {
|
||||||
|
Fail(stdErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete app
|
||||||
|
for i := 0; i < totalCount; i++ {
|
||||||
|
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||||
|
appClone.Name = name
|
||||||
|
err = deletePod(appClone.Name, appClone.Namespace, f.ClientSet, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// delete pvc clone
|
||||||
|
err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
// delete snapshot
|
||||||
|
err = deleteSnapshot(&snap, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
// delete parent pvc
|
||||||
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
Fail(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
// Make sure this should be last testcase in this file, because
|
// Make sure this should be last testcase in this file, because
|
||||||
// it deletes pool
|
// it deletes pool
|
||||||
By("Create a PVC and Delete PVC when backend pool deleted", func() {
|
By("Create a PVC and Delete PVC when backend pool deleted", func() {
|
||||||
|
@ -194,3 +194,13 @@ func ConstructMountOptions(mountOptions []string, volCap *csi.VolumeCapability)
|
|||||||
}
|
}
|
||||||
return mountOptions
|
return mountOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MountOptionContains checks the opt is present in mountOptions
|
||||||
|
func MountOptionContains(mountOptions []string, opt string) bool {
|
||||||
|
for _, mnt := range mountOptions {
|
||||||
|
if mnt == opt {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@ -204,7 +204,7 @@ func (ns *NodeServer) stageTransaction(ctx context.Context, req *csi.NodeStageVo
|
|||||||
transaction := stageTransaction{}
|
transaction := stageTransaction{}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
var readOnly bool
|
||||||
var cr *util.Credentials
|
var cr *util.Credentials
|
||||||
cr, err = util.NewUserCredentials(req.GetSecrets())
|
cr, err = util.NewUserCredentials(req.GetSecrets())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -219,6 +219,13 @@ func (ns *NodeServer) stageTransaction(ctx context.Context, req *csi.NodeStageVo
|
|||||||
}
|
}
|
||||||
defer volOptions.Destroy()
|
defer volOptions.Destroy()
|
||||||
|
|
||||||
|
// Allow image to be mounted on multiple nodes if it is ROX
|
||||||
|
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY {
|
||||||
|
klog.V(3).Infof(util.Log(ctx, "setting disableInUseChecks on rbd volume to: %v"), req.GetVolumeId)
|
||||||
|
volOptions.DisableInUseChecks = true
|
||||||
|
volOptions.readOnly = true
|
||||||
|
}
|
||||||
|
|
||||||
// Mapping RBD image
|
// Mapping RBD image
|
||||||
var devicePath string
|
var devicePath string
|
||||||
devicePath, err = attachRBDImage(ctx, volOptions, cr)
|
devicePath, err = attachRBDImage(ctx, volOptions, cr)
|
||||||
@ -248,15 +255,16 @@ func (ns *NodeServer) stageTransaction(ctx context.Context, req *csi.NodeStageVo
|
|||||||
transaction.isStagePathCreated = true
|
transaction.isStagePathCreated = true
|
||||||
|
|
||||||
// nodeStage Path
|
// nodeStage Path
|
||||||
err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath)
|
readOnly, err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return transaction, err
|
return transaction, err
|
||||||
}
|
}
|
||||||
transaction.isMounted = true
|
transaction.isMounted = true
|
||||||
|
|
||||||
|
if !readOnly {
|
||||||
// #nosec - allow anyone to write inside the target path
|
// #nosec - allow anyone to write inside the target path
|
||||||
err = os.Chmod(stagingTargetPath, 0777)
|
err = os.Chmod(stagingTargetPath, 0777)
|
||||||
|
}
|
||||||
return transaction, err
|
return transaction, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -388,7 +396,8 @@ func getLegacyVolumeName(mountPath string) (string, error) {
|
|||||||
return volName, nil
|
return volName, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ns *NodeServer) mountVolumeToStagePath(ctx context.Context, req *csi.NodeStageVolumeRequest, staticVol bool, stagingPath, devicePath string) error {
|
func (ns *NodeServer) mountVolumeToStagePath(ctx context.Context, req *csi.NodeStageVolumeRequest, staticVol bool, stagingPath, devicePath string) (bool, error) {
|
||||||
|
readOnly := false
|
||||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||||
diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: utilexec.New()}
|
diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: utilexec.New()}
|
||||||
// rbd images are thin-provisioned and return zeros for unwritten areas. A freshly created
|
// rbd images are thin-provisioned and return zeros for unwritten areas. A freshly created
|
||||||
@ -404,10 +413,29 @@ func (ns *NodeServer) mountVolumeToStagePath(ctx context.Context, req *csi.NodeS
|
|||||||
existingFormat, err := diskMounter.GetDiskFormat(devicePath)
|
existingFormat, err := diskMounter.GetDiskFormat(devicePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf(util.Log(ctx, "failed to get disk format for path %s, error: %v"), devicePath, err)
|
klog.Errorf(util.Log(ctx, "failed to get disk format for path %s, error: %v"), devicePath, err)
|
||||||
return err
|
return readOnly, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if existingFormat == "" && !staticVol {
|
opt := []string{"_netdev"}
|
||||||
|
opt = csicommon.ConstructMountOptions(opt, req.GetVolumeCapability())
|
||||||
|
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
||||||
|
rOnly := "ro"
|
||||||
|
|
||||||
|
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
|
||||||
|
req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
||||||
|
if !csicommon.MountOptionContains(opt, rOnly) {
|
||||||
|
opt = append(opt, rOnly)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if csicommon.MountOptionContains(opt, rOnly) {
|
||||||
|
readOnly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if fsType == "xfs" {
|
||||||
|
opt = append(opt, "nouuid")
|
||||||
|
}
|
||||||
|
|
||||||
|
if existingFormat == "" && !staticVol && !readOnly {
|
||||||
args := []string{}
|
args := []string{}
|
||||||
if fsType == "ext4" {
|
if fsType == "ext4" {
|
||||||
args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1", devicePath}
|
args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1", devicePath}
|
||||||
@ -417,19 +445,12 @@ func (ns *NodeServer) mountVolumeToStagePath(ctx context.Context, req *csi.NodeS
|
|||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
cmdOut, cmdErr := diskMounter.Exec.Command("mkfs."+fsType, args...).CombinedOutput()
|
cmdOut, cmdErr := diskMounter.Exec.Command("mkfs."+fsType, args...).CombinedOutput()
|
||||||
if cmdErr != nil {
|
if cmdErr != nil {
|
||||||
klog.Errorf(util.Log(ctx, "failed to run mkfs error: %v, output: %v"), cmdErr, cmdOut)
|
klog.Errorf(util.Log(ctx, "failed to run mkfs error: %v, output: %v"), cmdErr, string(cmdOut))
|
||||||
return cmdErr
|
return readOnly, cmdErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opt := []string{"_netdev"}
|
|
||||||
if fsType == "xfs" {
|
|
||||||
opt = append(opt, "nouuid")
|
|
||||||
}
|
|
||||||
opt = csicommon.ConstructMountOptions(opt, req.GetVolumeCapability())
|
|
||||||
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
|
||||||
|
|
||||||
if isBlock {
|
if isBlock {
|
||||||
opt = append(opt, "bind")
|
opt = append(opt, "bind")
|
||||||
err = diskMounter.Mount(devicePath, stagingPath, fsType, opt)
|
err = diskMounter.Mount(devicePath, stagingPath, fsType, opt)
|
||||||
@ -445,7 +466,7 @@ func (ns *NodeServer) mountVolumeToStagePath(ctx context.Context, req *csi.NodeS
|
|||||||
req.GetVolumeId(),
|
req.GetVolumeId(),
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
return err
|
return readOnly, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ns *NodeServer) mountVolume(ctx context.Context, stagingPath string, req *csi.NodePublishVolumeRequest) error {
|
func (ns *NodeServer) mountVolume(ctx context.Context, stagingPath string, req *csi.NodePublishVolumeRequest) error {
|
||||||
|
@ -225,6 +225,9 @@ func createPath(ctx context.Context, volOpt *rbdVolume, cr *util.Credentials) (s
|
|||||||
// Update options with device type selection
|
// Update options with device type selection
|
||||||
mapOptions = append(mapOptions, "--device-type", accessType)
|
mapOptions = append(mapOptions, "--device-type", accessType)
|
||||||
|
|
||||||
|
if volOpt.readOnly {
|
||||||
|
mapOptions = append(mapOptions, "--read-only")
|
||||||
|
}
|
||||||
// Execute map
|
// Execute map
|
||||||
output, err := execCommand(rbd, mapOptions)
|
output, err := execCommand(rbd, mapOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -100,6 +100,7 @@ type rbdVolume struct {
|
|||||||
VolSize int64 `json:"volSize"`
|
VolSize int64 `json:"volSize"`
|
||||||
DisableInUseChecks bool `json:"disableInUseChecks"`
|
DisableInUseChecks bool `json:"disableInUseChecks"`
|
||||||
Encrypted bool
|
Encrypted bool
|
||||||
|
readOnly bool
|
||||||
KMS util.EncryptionKMS
|
KMS util.EncryptionKMS
|
||||||
|
|
||||||
// conn is a connection to the Ceph cluster obtained from a ConnPool
|
// conn is a connection to the Ceph cluster obtained from a ConnPool
|
||||||
|
Loading…
Reference in New Issue
Block a user