mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
rbd: add skipForceFlatten flag
added skipForceFlatten flag to skip the image deptha and skip image flattening. This will be very useful if the kernel is not listed in cephcsi which supports deep flatten fauture. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
e3a63029a3
commit
b085577a4f
@ -116,6 +116,9 @@ spec:
|
||||
- "--drivername=$(DRIVER_NAME)"
|
||||
- "--rbdhardmaxclonedepth={{ .Values.provisioner.hardMaxCloneDepth }}"
|
||||
- "--rbdsoftmaxclonedepth={{ .Values.provisioner.softMaxCloneDepth }}"
|
||||
{{- if .Values.provisioner.skipForceFlatten }}
|
||||
- "--skipforceflatten={{ .Values.provisioner.skipForceFlatten }}"
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
|
@ -113,6 +113,9 @@ provisioner:
|
||||
# Soft limit for maximum number of nested volume clones that are taken before
|
||||
# a flatten occurs
|
||||
softMaxCloneDepth: 4
|
||||
# skip image flattening if kernel support mapping of rbd images
|
||||
# which has the deep-flatten feature
|
||||
# skipForceFlatten: false
|
||||
|
||||
httpMetrics:
|
||||
# Metrics only available for cephcsi/cephcsi => 1.2.0
|
||||
|
@ -79,6 +79,10 @@ func init() {
|
||||
|
||||
flag.UintVar(&conf.RbdHardMaxCloneDepth, "rbdhardmaxclonedepth", 8, "Hard limit for maximum number of nested volume clones that are taken before a flatten occurs")
|
||||
flag.UintVar(&conf.RbdSoftMaxCloneDepth, "rbdsoftmaxclonedepth", 4, "Soft limit for maximum number of nested volume clones that are taken before a flatten occurs")
|
||||
|
||||
flag.BoolVar(&conf.SkipForceFlatten, "skipforceflatten", false,
|
||||
"skip image flattening if kernel support mapping of rbd images which has the deep-flatten feature")
|
||||
|
||||
flag.BoolVar(&conf.Version, "version", false, "Print cephcsi version information")
|
||||
|
||||
klog.InitFlags(nil)
|
||||
|
@ -44,6 +44,7 @@ make image-cephcsi
|
||||
| `--domainlabels` | _empty_ | Kubernetes node labels to use as CSI domain labels for topology aware provisioning, should be a comma separated value (ex:= "failure-domain/region,failure-domain/zone") |
|
||||
| `--rbdhardmaxclonedepth` | `8` | Hard limit for maximum number of nested volume clones that are taken before a flatten occurs |
|
||||
| `--rbdsoftmaxclonedepth` | `4` | Soft limit for maximum number of nested volume clones that are taken before a flatten occurs |
|
||||
| `--skipforceflatten` | `false` | skip image flattening on kernel < 5.2 which support mapping of rbd images which has the deep-flatten feature |
|
||||
|
||||
**Available volume parameters:**
|
||||
|
||||
|
@ -58,6 +58,8 @@ var (
|
||||
|
||||
// rbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten occurs
|
||||
rbdSoftMaxCloneDepth uint
|
||||
|
||||
skipForceFlatten bool
|
||||
)
|
||||
|
||||
// NewDriver returns new rbd driver
|
||||
@ -111,6 +113,7 @@ func (r *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
|
||||
// update clone soft and hard limit
|
||||
rbdHardMaxCloneDepth = conf.RbdHardMaxCloneDepth
|
||||
rbdSoftMaxCloneDepth = conf.RbdSoftMaxCloneDepth
|
||||
skipForceFlatten = conf.SkipForceFlatten
|
||||
// Create instances of the volume and snapshot journal
|
||||
volJournal = journal.NewCSIVolumeJournal(CSIInstanceID)
|
||||
snapJournal = journal.NewCSISnapshotJournal(CSIInstanceID)
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/ceph/ceph-csi/internal/journal"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
|
||||
librbd "github.com/ceph/go-ceph/rbd"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@ -60,6 +61,22 @@ type stageTransaction struct {
|
||||
devicePath string
|
||||
}
|
||||
|
||||
var (
|
||||
kernelRelease = ""
|
||||
// deepFlattenSupport holds the list of kernel which support mapping rbd
|
||||
// image with deep-flatten image feature
|
||||
deepFlattenSupport = []util.KernelVersion{
|
||||
{
|
||||
Version: 5,
|
||||
PatchLevel: 2,
|
||||
SubLevel: 0,
|
||||
ExtraVersion: 0,
|
||||
Distribution: "",
|
||||
Backport: false,
|
||||
}, // standard 5.2+ versions
|
||||
}
|
||||
)
|
||||
|
||||
// NodeStageVolume mounts the volume to a staging path on the node.
|
||||
// Implementation notes:
|
||||
// - stagingTargetPath is the directory passed in the request where the volume needs to be staged
|
||||
@ -205,6 +222,8 @@ func (ns *NodeServer) stageTransaction(ctx context.Context, req *csi.NodeStageVo
|
||||
|
||||
var err error
|
||||
var readOnly bool
|
||||
var feature bool
|
||||
|
||||
var cr *util.Credentials
|
||||
cr, err = util.NewUserCredentials(req.GetSecrets())
|
||||
if err != nil {
|
||||
@ -226,6 +245,27 @@ func (ns *NodeServer) stageTransaction(ctx context.Context, req *csi.NodeStageVo
|
||||
volOptions.readOnly = true
|
||||
}
|
||||
|
||||
if kernelRelease == "" {
|
||||
// fetch the current running kernel info
|
||||
kernelRelease, err = util.GetKernelVersion()
|
||||
if err != nil {
|
||||
return transaction, err
|
||||
}
|
||||
}
|
||||
if !util.CheckKernelSupport(kernelRelease, deepFlattenSupport) {
|
||||
if !skipForceFlatten {
|
||||
feature, err = volOptions.checkImageChainHasFeature(ctx, librbd.FeatureDeepFlatten)
|
||||
if err != nil {
|
||||
return transaction, err
|
||||
}
|
||||
if feature {
|
||||
err = volOptions.flattenRbdImage(ctx, cr, true)
|
||||
if err != nil {
|
||||
return transaction, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Mapping RBD image
|
||||
var devicePath string
|
||||
devicePath, err = attachRBDImage(ctx, volOptions, cr)
|
||||
|
@ -95,6 +95,10 @@ type Config struct {
|
||||
IsNodeServer bool // if set to true start node server
|
||||
Version bool // cephcsi version
|
||||
|
||||
// SkipForceFlatten is set to false if the kernel supports mounting of
|
||||
// rbd image or the image chain has the deep-flatten feature.
|
||||
SkipForceFlatten bool
|
||||
|
||||
// cephfs related flags
|
||||
ForceKernelCephFS bool // force to use the ceph kernel client even if the kernel is < 4.17
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user