2019-03-20 19:14:20 +00:00
|
|
|
/*
|
|
|
|
Copyright 2019 The Ceph-CSI Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"flag"
|
2019-11-06 04:52:07 +00:00
|
|
|
"fmt"
|
2019-03-20 19:14:20 +00:00
|
|
|
"os"
|
2019-11-06 04:52:07 +00:00
|
|
|
"runtime"
|
2019-06-20 19:30:40 +00:00
|
|
|
"time"
|
2019-03-20 19:14:20 +00:00
|
|
|
|
2020-04-17 09:23:49 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/cephfs"
|
2020-11-17 07:38:54 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/controller"
|
|
|
|
"github.com/ceph/ceph-csi/internal/controller/persistentvolume"
|
2024-08-05 12:04:31 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/controller/volumegroup"
|
2020-04-17 09:23:49 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/liveness"
|
2022-03-17 09:26:17 +00:00
|
|
|
nfsdriver "github.com/ceph/ceph-csi/internal/nfs/driver"
|
2021-12-09 08:18:39 +00:00
|
|
|
rbddriver "github.com/ceph/ceph-csi/internal/rbd/driver"
|
2020-04-17 09:23:49 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2020-04-15 03:38:16 +00:00
|
|
|
|
2021-03-24 13:13:56 +00:00
|
|
|
"k8s.io/klog/v2"
|
2019-03-20 19:14:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2020-11-17 07:38:54 +00:00
|
|
|
rbdType = "rbd"
|
2021-09-20 10:16:55 +00:00
|
|
|
cephFSType = "cephfs"
|
2022-03-17 09:26:17 +00:00
|
|
|
nfsType = "nfs"
|
2020-11-17 07:38:54 +00:00
|
|
|
livenessType = "liveness"
|
|
|
|
controllerType = "controller"
|
2019-03-20 19:14:20 +00:00
|
|
|
|
2019-06-20 19:30:40 +00:00
|
|
|
rbdDefaultName = "rbd.csi.ceph.com"
|
2021-09-20 10:16:55 +00:00
|
|
|
cephFSDefaultName = "cephfs.csi.ceph.com"
|
2022-03-17 09:26:17 +00:00
|
|
|
nfsDefaultName = "nfs.csi.ceph.com"
|
2019-06-20 19:30:40 +00:00
|
|
|
livenessDefaultName = "liveness.csi.ceph.com"
|
2020-07-21 05:10:13 +00:00
|
|
|
|
|
|
|
pollTime = 60 // seconds
|
|
|
|
probeTimeout = 3 // seconds
|
2020-11-17 07:38:54 +00:00
|
|
|
|
2021-07-08 14:59:34 +00:00
|
|
|
// use default namespace if namespace is not set.
|
2020-11-17 07:38:54 +00:00
|
|
|
defaultNS = "default"
|
2021-05-31 11:11:01 +00:00
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
defaultPluginPath = "/var/lib/kubelet/plugins"
|
2022-06-23 05:56:10 +00:00
|
|
|
defaultStagingPath = defaultPluginPath + "/kubernetes.io/csi/"
|
2019-03-20 19:14:20 +00:00
|
|
|
)
|
|
|
|
|
2021-07-13 13:15:25 +00:00
|
|
|
var conf util.Config
|
2019-08-14 05:57:45 +00:00
|
|
|
|
|
|
|
func init() {
|
2019-03-20 19:14:20 +00:00
|
|
|
// common flags
|
2022-03-17 09:26:17 +00:00
|
|
|
flag.StringVar(&conf.Vtype, "type", "", "driver type [rbd|cephfs|nfs|liveness|controller]")
|
2021-12-22 07:46:53 +00:00
|
|
|
flag.StringVar(&conf.Endpoint, "endpoint", "unix:///tmp/csi.sock", "CSI endpoint")
|
2019-08-14 05:57:45 +00:00
|
|
|
flag.StringVar(&conf.DriverName, "drivername", "", "name of the driver")
|
2020-11-17 07:38:54 +00:00
|
|
|
flag.StringVar(&conf.DriverNamespace, "drivernamespace", defaultNS, "namespace in which driver is deployed")
|
2019-08-14 05:57:45 +00:00
|
|
|
flag.StringVar(&conf.NodeID, "nodeid", "", "node id")
|
2021-05-31 11:11:01 +00:00
|
|
|
flag.StringVar(&conf.PluginPath, "pluginpath", defaultPluginPath, "plugin path")
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
flag.StringVar(&conf.StagingPath, "stagingpath", defaultStagingPath, "staging path")
|
2022-04-11 04:27:29 +00:00
|
|
|
flag.StringVar(&conf.ClusterName, "clustername", "", "name of the cluster")
|
2022-04-12 04:03:00 +00:00
|
|
|
flag.BoolVar(&conf.SetMetadata, "setmetadata", false, "set metadata on the volume")
|
2024-08-02 13:19:02 +00:00
|
|
|
flag.StringVar(&conf.InstanceID, "instanceid", "default", "Unique ID distinguishing this instance of Ceph-CSI"+
|
|
|
|
" among other instances, when sharing Ceph clusters across CSI instances for provisioning")
|
2019-08-14 05:57:45 +00:00
|
|
|
flag.IntVar(&conf.PidLimit, "pidlimit", 0, "the PID limit to configure through cgroups")
|
2019-08-14 06:42:17 +00:00
|
|
|
flag.BoolVar(&conf.IsControllerServer, "controllerserver", false, "start cephcsi controller server")
|
|
|
|
flag.BoolVar(&conf.IsNodeServer, "nodeserver", false, "start cephcsi node server")
|
2021-06-25 12:19:02 +00:00
|
|
|
flag.StringVar(
|
|
|
|
&conf.DomainLabels,
|
|
|
|
"domainlabels",
|
|
|
|
"",
|
2023-02-02 09:46:37 +00:00
|
|
|
"list of Kubernetes node labels, that determines the topology"+
|
2021-06-25 12:19:02 +00:00
|
|
|
" domain the node belongs to, separated by ','")
|
2023-02-02 09:46:37 +00:00
|
|
|
flag.BoolVar(&conf.EnableReadAffinity, "enable-read-affinity", false, "enable read affinity")
|
|
|
|
flag.StringVar(
|
|
|
|
&conf.CrushLocationLabels,
|
|
|
|
"crush-location-labels",
|
|
|
|
"",
|
|
|
|
"list of Kubernetes node labels, that determines the"+
|
|
|
|
" CRUSH location the node belongs to, separated by ','")
|
2019-03-20 19:14:20 +00:00
|
|
|
|
|
|
|
// cephfs related flags
|
2021-06-25 12:19:02 +00:00
|
|
|
flag.BoolVar(
|
|
|
|
&conf.ForceKernelCephFS,
|
|
|
|
"forcecephkernelclient",
|
|
|
|
false,
|
|
|
|
"enable Ceph Kernel clients on kernel < 4.17 which support quotas")
|
2022-07-06 15:46:12 +00:00
|
|
|
flag.StringVar(
|
|
|
|
&conf.KernelMountOptions,
|
|
|
|
"kernelmountoptions",
|
|
|
|
"",
|
|
|
|
"Comma separated string of mount options accepted by cephfs kernel mounter")
|
2024-07-06 05:10:29 +00:00
|
|
|
flag.StringVar(
|
|
|
|
&conf.RadosNamespaceCephFS,
|
|
|
|
"radosnamespacecephfs",
|
|
|
|
"",
|
|
|
|
"CephFS RadosNamespace used to store CSI specific objects and keys.")
|
2022-07-06 15:46:12 +00:00
|
|
|
flag.StringVar(
|
|
|
|
&conf.FuseMountOptions,
|
|
|
|
"fusemountoptions",
|
|
|
|
"",
|
|
|
|
"Comma separated string of mount options accepted by ceph-fuse mounter")
|
2019-06-20 19:30:40 +00:00
|
|
|
|
2023-11-02 12:00:55 +00:00
|
|
|
// liveness/profile metrics related flags
|
|
|
|
flag.IntVar(&conf.MetricsPort, "metricsport", 8080, "TCP port for liveness/profile metrics requests")
|
2021-06-25 12:19:02 +00:00
|
|
|
flag.StringVar(
|
|
|
|
&conf.MetricsPath,
|
|
|
|
"metricspath",
|
|
|
|
"/metrics",
|
|
|
|
"path of prometheus endpoint where metrics will be available")
|
2020-07-21 05:10:13 +00:00
|
|
|
flag.DurationVar(&conf.PollTime, "polltime", time.Second*pollTime, "time interval in seconds between each poll")
|
|
|
|
flag.DurationVar(&conf.PoolTimeout, "timeout", time.Second*probeTimeout, "probe timeout in seconds")
|
2024-09-17 13:52:30 +00:00
|
|
|
flag.DurationVar(
|
|
|
|
&conf.LogSlowOpInterval,
|
|
|
|
"logslowopinterval",
|
|
|
|
time.Second*30,
|
|
|
|
"how often to inform about slow gRPC calls")
|
2019-08-21 09:28:02 +00:00
|
|
|
|
2021-06-25 12:19:02 +00:00
|
|
|
flag.UintVar(
|
|
|
|
&conf.RbdHardMaxCloneDepth,
|
|
|
|
"rbdhardmaxclonedepth",
|
|
|
|
8,
|
|
|
|
"Hard limit for maximum number of nested volume clones that are taken before a flatten occurs")
|
|
|
|
flag.UintVar(
|
|
|
|
&conf.RbdSoftMaxCloneDepth,
|
|
|
|
"rbdsoftmaxclonedepth",
|
|
|
|
4,
|
|
|
|
"Soft limit for maximum number of nested volume clones that are taken before a flatten occurs")
|
|
|
|
flag.UintVar(
|
|
|
|
&conf.MaxSnapshotsOnImage,
|
|
|
|
"maxsnapshotsonimage",
|
|
|
|
450,
|
|
|
|
"Maximum number of snapshots allowed on rbd image without flattening")
|
|
|
|
flag.UintVar(
|
|
|
|
&conf.MinSnapshotsOnImage,
|
|
|
|
"minsnapshotsonimage",
|
|
|
|
250,
|
|
|
|
"Minimum number of snapshots required on rbd image to start flattening")
|
2020-06-24 08:12:12 +00:00
|
|
|
flag.BoolVar(&conf.SkipForceFlatten, "skipforceflatten", false,
|
|
|
|
"skip image flattening if kernel support mapping of rbd images which has the deep-flatten feature")
|
|
|
|
|
2019-11-06 04:52:07 +00:00
|
|
|
flag.BoolVar(&conf.Version, "version", false, "Print cephcsi version information")
|
2021-03-25 11:14:46 +00:00
|
|
|
flag.BoolVar(&conf.EnableProfiling, "enableprofiling", false, "enable go profiling")
|
2019-11-06 04:52:07 +00:00
|
|
|
|
2021-11-26 19:20:03 +00:00
|
|
|
// CSI-Addons configuration
|
2021-12-22 07:46:53 +00:00
|
|
|
flag.StringVar(&conf.CSIAddonsEndpoint, "csi-addons-endpoint", "unix:///tmp/csi-addons.sock", "CSI-Addons endpoint")
|
2021-11-26 19:20:03 +00:00
|
|
|
|
2019-03-20 19:14:20 +00:00
|
|
|
klog.InitFlags(nil)
|
|
|
|
if err := flag.Set("logtostderr", "true"); err != nil {
|
|
|
|
klog.Exitf("failed to set logtostderr flag: %v", err)
|
|
|
|
}
|
|
|
|
flag.Parse()
|
|
|
|
}
|
|
|
|
|
|
|
|
func getDriverName() string {
|
|
|
|
// was explicitly passed a driver name
|
2019-08-14 05:57:45 +00:00
|
|
|
if conf.DriverName != "" {
|
|
|
|
return conf.DriverName
|
2019-03-20 19:14:20 +00:00
|
|
|
}
|
|
|
|
// select driver name based on volume type
|
2019-08-14 05:57:45 +00:00
|
|
|
switch conf.Vtype {
|
2019-03-20 19:14:20 +00:00
|
|
|
case rbdType:
|
|
|
|
return rbdDefaultName
|
2021-09-20 10:16:55 +00:00
|
|
|
case cephFSType:
|
|
|
|
return cephFSDefaultName
|
2022-03-17 09:26:17 +00:00
|
|
|
case nfsType:
|
|
|
|
return nfsDefaultName
|
2019-06-20 19:30:40 +00:00
|
|
|
case livenessType:
|
|
|
|
return livenessDefaultName
|
2019-03-20 19:14:20 +00:00
|
|
|
default:
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-17 09:21:30 +00:00
|
|
|
func printVersion() {
|
|
|
|
fmt.Println("Cephcsi Version:", util.DriverVersion)
|
|
|
|
fmt.Println("Git Commit:", util.GitCommit)
|
|
|
|
fmt.Println("Go Version:", runtime.Version())
|
|
|
|
fmt.Println("Compiler:", runtime.Compiler)
|
|
|
|
fmt.Printf("Platform: %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
|
|
|
if kv, err := util.GetKernelVersion(); err == nil {
|
|
|
|
fmt.Println("Kernel:", kv)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-20 19:14:20 +00:00
|
|
|
func main() {
|
2019-11-06 04:52:07 +00:00
|
|
|
if conf.Version {
|
2022-03-17 09:21:30 +00:00
|
|
|
printVersion()
|
2019-11-06 04:52:07 +00:00
|
|
|
os.Exit(0)
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DefaultLog("Driver version: %s and Git version: %s", util.DriverVersion, util.GitCommit)
|
2019-05-28 19:03:18 +00:00
|
|
|
|
2019-08-14 05:57:45 +00:00
|
|
|
if conf.Vtype == "" {
|
cleanup: prevent Go panic on missing driver type
When running the 'cephcsi' executable without arguments, a Go panic is
reported:
$ ./_output/cephcsi
F1026 13:59:04.302740 3409054 cephcsi.go:126] driver type not specified
goroutine 1 [running]:
k8s.io/klog/v2.stacks(0xc000010001, 0xc0000520a0, 0x48, 0x9a)
/go/src/github.com/ceph/ceph-csi/vendor/k8s.io/klog/v2/klog.go:996 +0xb9
k8s.io/klog/v2.(*loggingT).output(0x2370360, 0xc000000003, 0x0, 0x0, 0xc000194770, 0x20cb265, 0xa, 0x7e, 0x413500)
/go/src/github.com/ceph/ceph-csi/vendor/k8s.io/klog/v2/klog.go:945 +0x191
k8s.io/klog/v2.(*loggingT).println(0x2370360, 0x3, 0x0, 0x0, 0xc000163e08, 0x1, 0x1)
/go/src/github.com/ceph/ceph-csi/vendor/k8s.io/klog/v2/klog.go:699 +0x11a
k8s.io/klog/v2.Fatalln(...)
/go/src/github.com/ceph/ceph-csi/vendor/k8s.io/klog/v2/klog.go:1456
main.main()
/go/src/github.com/ceph/ceph-csi/cmd/cephcsi.go:126 +0xafa
Just logging the error and exiting should be sufficient. This stack-trace
from the Go panic does not add any useful information.
Signed-off-by: Niels de Vos <ndevos@redhat.com>
2020-10-26 13:08:55 +00:00
|
|
|
logAndExit("driver type not specified")
|
2019-03-20 19:14:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dname := getDriverName()
|
|
|
|
err := util.ValidateDriverName(dname)
|
|
|
|
if err != nil {
|
2020-10-26 13:18:48 +00:00
|
|
|
logAndExit(err.Error())
|
2019-03-20 19:14:20 +00:00
|
|
|
}
|
2019-07-04 09:49:57 +00:00
|
|
|
|
2023-04-25 07:38:03 +00:00
|
|
|
setPIDLimit(&conf)
|
2019-07-26 12:36:43 +00:00
|
|
|
|
2023-11-02 12:00:55 +00:00
|
|
|
if conf.EnableProfiling || conf.Vtype == livenessType {
|
2019-08-21 09:28:02 +00:00
|
|
|
// validate metrics endpoint
|
|
|
|
conf.MetricsIP = os.Getenv("POD_IP")
|
|
|
|
|
|
|
|
if conf.MetricsIP == "" {
|
|
|
|
klog.Warning("missing POD_IP env var defaulting to 0.0.0.0")
|
|
|
|
conf.MetricsIP = "0.0.0.0"
|
|
|
|
}
|
|
|
|
err = util.ValidateURL(&conf)
|
|
|
|
if err != nil {
|
2020-10-26 13:18:48 +00:00
|
|
|
logAndExit(err.Error())
|
2019-08-21 09:28:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-08 09:37:38 +00:00
|
|
|
if err = util.WriteCephConfig(); err != nil {
|
|
|
|
log.FatalLogMsg("failed to write ceph configuration file (%v)", err)
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DefaultLog("Starting driver type: %v with name: %v", conf.Vtype, dname)
|
2019-08-14 05:57:45 +00:00
|
|
|
switch conf.Vtype {
|
2019-03-20 19:14:20 +00:00
|
|
|
case rbdType:
|
2020-06-24 06:44:02 +00:00
|
|
|
validateCloneDepthFlag(&conf)
|
2023-09-02 07:15:08 +00:00
|
|
|
validateMaxSnapshotFlag(&conf)
|
2021-12-09 08:18:39 +00:00
|
|
|
driver := rbddriver.NewDriver()
|
2020-07-10 10:44:59 +00:00
|
|
|
driver.Run(&conf)
|
2019-03-20 19:14:20 +00:00
|
|
|
|
2021-09-20 10:16:55 +00:00
|
|
|
case cephFSType:
|
2019-03-20 19:14:20 +00:00
|
|
|
driver := cephfs.NewDriver()
|
2020-07-10 10:44:59 +00:00
|
|
|
driver.Run(&conf)
|
2019-03-20 19:14:20 +00:00
|
|
|
|
2022-03-17 09:26:17 +00:00
|
|
|
case nfsType:
|
|
|
|
driver := nfsdriver.NewDriver()
|
|
|
|
driver.Run(&conf)
|
|
|
|
|
2019-06-20 19:30:40 +00:00
|
|
|
case livenessType:
|
2019-08-14 05:57:45 +00:00
|
|
|
liveness.Run(&conf)
|
2020-11-17 07:38:54 +00:00
|
|
|
|
|
|
|
case controllerType:
|
|
|
|
cfg := controller.Config{
|
2022-04-11 04:27:29 +00:00
|
|
|
DriverName: dname,
|
|
|
|
Namespace: conf.DriverNamespace,
|
|
|
|
ClusterName: conf.ClusterName,
|
2024-08-02 13:19:02 +00:00
|
|
|
InstanceID: conf.InstanceID,
|
2022-04-12 04:03:00 +00:00
|
|
|
SetMetadata: conf.SetMetadata,
|
2020-11-17 07:38:54 +00:00
|
|
|
}
|
|
|
|
// initialize all controllers before starting.
|
|
|
|
initControllers()
|
|
|
|
err = controller.Start(cfg)
|
|
|
|
if err != nil {
|
|
|
|
logAndExit(err.Error())
|
|
|
|
}
|
2019-03-20 19:14:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
2020-06-24 06:44:02 +00:00
|
|
|
|
2023-04-25 07:38:03 +00:00
|
|
|
func setPIDLimit(conf *util.Config) {
|
|
|
|
// set pidLimit only for NodeServer
|
|
|
|
// the driver may need a higher PID limit for handling all concurrent requests
|
|
|
|
if conf.IsNodeServer && conf.PidLimit != 0 {
|
|
|
|
currentLimit, pidErr := util.GetPIDLimit()
|
|
|
|
if pidErr != nil {
|
|
|
|
klog.Errorf("Failed to get the PID limit, can not reconfigure: %v", pidErr)
|
|
|
|
} else {
|
|
|
|
log.DefaultLog("Initial PID limit is set to %d", currentLimit)
|
|
|
|
err := util.SetPIDLimit(conf.PidLimit)
|
|
|
|
switch {
|
|
|
|
case err != nil:
|
|
|
|
klog.Errorf("Failed to set new PID limit to %d: %v", conf.PidLimit, err)
|
|
|
|
case conf.PidLimit == -1:
|
|
|
|
log.DefaultLog("Reconfigured PID limit to %d (max)", conf.PidLimit)
|
|
|
|
default:
|
|
|
|
log.DefaultLog("Reconfigured PID limit to %d", conf.PidLimit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-17 07:38:54 +00:00
|
|
|
// initControllers will initialize all the controllers.
|
|
|
|
func initControllers() {
|
|
|
|
// Add list of controller here.
|
|
|
|
persistentvolume.Init()
|
2024-08-05 12:04:31 +00:00
|
|
|
volumegroup.Init()
|
2020-11-17 07:38:54 +00:00
|
|
|
}
|
|
|
|
|
2020-06-24 06:44:02 +00:00
|
|
|
func validateCloneDepthFlag(conf *util.Config) {
|
|
|
|
// keeping hardlimit to 14 as max to avoid max image depth
|
|
|
|
if conf.RbdHardMaxCloneDepth == 0 || conf.RbdHardMaxCloneDepth > 14 {
|
2020-10-26 13:18:48 +00:00
|
|
|
logAndExit("rbdhardmaxclonedepth flag value should be between 1 and 14")
|
2020-06-24 06:44:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if conf.RbdSoftMaxCloneDepth > conf.RbdHardMaxCloneDepth {
|
2020-10-26 13:18:48 +00:00
|
|
|
logAndExit("rbdsoftmaxclonedepth flag value should not be greater than rbdhardmaxclonedepth")
|
2020-06-24 06:44:02 +00:00
|
|
|
}
|
|
|
|
}
|
2020-07-01 05:27:11 +00:00
|
|
|
|
2023-09-02 07:15:08 +00:00
|
|
|
func validateMaxSnapshotFlag(conf *util.Config) {
|
2020-07-01 05:27:11 +00:00
|
|
|
// maximum number of snapshots on an image are 510 [1] and 16 images in
|
|
|
|
// a parent/child chain [2],keeping snapshot limit to 500 to avoid issues.
|
|
|
|
// [1] https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c#L98
|
|
|
|
// [2] https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c#L92
|
|
|
|
if conf.MaxSnapshotsOnImage == 0 || conf.MaxSnapshotsOnImage > 500 {
|
2020-10-26 13:18:48 +00:00
|
|
|
logAndExit("maxsnapshotsonimage flag value should be between 1 and 500")
|
2020-07-01 05:27:11 +00:00
|
|
|
}
|
2020-11-17 03:34:29 +00:00
|
|
|
|
|
|
|
if conf.MinSnapshotsOnImage > conf.MaxSnapshotsOnImage {
|
|
|
|
logAndExit("minsnapshotsonimage flag value should be less than maxsnapshotsonimage")
|
|
|
|
}
|
2020-07-01 05:27:11 +00:00
|
|
|
}
|
2020-10-27 07:13:30 +00:00
|
|
|
|
|
|
|
func logAndExit(msg string) {
|
|
|
|
klog.Errorln(msg)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|