rbd: add hardlimt and softlimit flag

added Hardlimit and Softlimit flags for cephcsi
arguments. When the Softlimit is reached cephcsi
will start a background task to flatten the rbd
image and return success and if the hardlimit
is reached it will start a background task
to flatten the rbd image and return ready
to use as false to make sure that the image
will not be used until it is flatten.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2020-06-24 12:14:02 +05:30 committed by mergify[bot]
parent 3e7fa93256
commit 9b518726ab
7 changed files with 56 additions and 17 deletions

View File

@ -114,6 +114,8 @@ spec:
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=$(DRIVER_NAME)"
- "--rbdhardmaxclonedepth={{ .Values.provisioner.hardMaxCloneDepth }}"
- "--rbdsoftmaxclonedepth={{ .Values.provisioner.softMaxCloneDepth }}"
env:
- name: POD_IP
valueFrom:

View File

@ -107,6 +107,12 @@ provisioner:
replicaCount: 3
# Timeout for waiting for creation or deletion of a volume
timeout: 60s
# Hard limit for maximum number of nested volume clones that are taken before
# a flatten occurs
hardMaxCloneDepth: 8
# Soft limit for maximum number of nested volume clones that are taken before
# a flatten occurs
softMaxCloneDepth: 4
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0

View File

@ -77,6 +77,8 @@ func init() {
flag.StringVar(&conf.HistogramOption, "histogramoption", "0.5,2,6",
"[DEPRECATED] Histogram option for grpc metrics, should be comma separated value, ex:= 0.5,2,6 where start=0.5 factor=2, count=6")
flag.UintVar(&conf.RbdHardMaxCloneDepth, "rbdhardmaxclonedepth", 8, "Hard limit for maximum number of nested volume clones that are taken before a flatten occurs")
flag.UintVar(&conf.RbdSoftMaxCloneDepth, "rbdsoftmaxclonedepth", 4, "Soft limit for maximum number of nested volume clones that are taken before a flatten occurs")
flag.BoolVar(&conf.Version, "version", false, "Print cephcsi version information")
klog.InitFlags(nil)
@ -175,6 +177,7 @@ func main() {
klog.V(1).Infof("Starting driver type: %v with name: %v", conf.Vtype, dname)
switch conf.Vtype {
case rbdType:
validateCloneDepthFlag(&conf)
driver := rbd.NewDriver()
driver.Run(&conf, cp)
@ -194,3 +197,14 @@ func main() {
os.Exit(0)
}
func validateCloneDepthFlag(conf *util.Config) {
// keeping hardlimit to 14 as max to avoid max image depth
if conf.RbdHardMaxCloneDepth == 0 || conf.RbdHardMaxCloneDepth > 14 {
klog.Fatalln("rbdhardmaxclonedepth flag value should be between 1 and 14")
}
if conf.RbdSoftMaxCloneDepth > conf.RbdHardMaxCloneDepth {
klog.Fatalln("rbdsoftmaxclonedepth flag value should not be greater than rbdhardmaxclonedepth")
}
}

View File

@ -112,6 +112,8 @@ spec:
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--pidlimit=-1"
- "--rbdhardmaxclonedepth=8"
- "--rbdsoftmaxclonedepth=4"
env:
- name: POD_IP
valueFrom:

View File

@ -27,7 +27,7 @@ make image-cephcsi
**Available command line arguments:**
| Option | Default value | Description |
|-----------------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| ------------------------ | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket |
| `--drivername` | `rbd.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) |
| `--nodeid` | _empty_ | This node's ID |
@ -42,6 +42,8 @@ make image-cephcsi
| `--timeout` | `"3s"` | Probe timeout in seconds |
| `--histogramoption` | `0.5,2,6` | [Deprecated] Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
| `--domainlabels` | _empty_ | Kubernetes node labels to use as CSI domain labels for topology aware provisioning, should be a comma separated value (ex:= "failure-domain/region,failure-domain/zone") |
| `--rbdhardmaxclonedepth` | `8` | Hard limit for maximum number of nested volume clones that are taken before a flatten occurs |
| `--rbdsoftmaxclonedepth` | `4` | Soft limit for maximum number of nested volume clones that are taken before a flatten occurs |
**Available volume parameters:**

View File

@ -53,6 +53,11 @@ var (
// VolumeName to backing RBD images
volJournal *journal.Config
snapJournal *journal.Config
// rbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten occurs
rbdHardMaxCloneDepth uint
// rbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten occurs
rbdSoftMaxCloneDepth uint
)
// NewDriver returns new rbd driver
@ -103,6 +108,9 @@ func (r *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
CSIInstanceID = conf.InstanceID
}
// update clone soft and hard limit
rbdHardMaxCloneDepth = conf.RbdHardMaxCloneDepth
rbdSoftMaxCloneDepth = conf.RbdSoftMaxCloneDepth
// Create instances of the volume and snapshot journal
volJournal = journal.NewCSIVolumeJournal(CSIInstanceID)
snapJournal = journal.NewCSISnapshotJournal(CSIInstanceID)

View File

@ -98,6 +98,11 @@ type Config struct {
// cephfs related flags
ForceKernelCephFS bool // force to use the ceph kernel client even if the kernel is < 4.17
// RbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten occurs
RbdHardMaxCloneDepth uint
// RbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten occurs
RbdSoftMaxCloneDepth uint
}
// CreatePersistanceStorage creates storage path and initializes new cache