Added forcecephkernelclient as startup parameter to force enabling ceph

Signed-off-by: Stefan Haas <shaas@suse.com>
This commit is contained in:
Stefan Haas 2019-10-10 12:15:44 +02:00 committed by mergify[bot]
parent 50268d5947
commit 6a2717ce20
5 changed files with 31 additions and 20 deletions

View File

@ -63,6 +63,7 @@ func init() {
// cephfs related flags // cephfs related flags
flag.StringVar(&conf.MountCacheDir, "mountcachedir", "", "mount info cache save dir") flag.StringVar(&conf.MountCacheDir, "mountcachedir", "", "mount info cache save dir")
flag.BoolVar(&conf.ForceKernelCephFS, "forcecephkernelclient", false, "enable Ceph Kernel clients on kernel < 4.17 which support quotas")
// liveness/grpc metrics related flags // liveness/grpc metrics related flags
flag.IntVar(&conf.MetricsPort, "metricsport", 8080, "TCP port for liveness/grpc metrics requests") flag.IntVar(&conf.MetricsPort, "metricsport", 8080, "TCP port for liveness/grpc metrics requests")

View File

@ -43,23 +43,28 @@ that should be resolved in v14.2.3.
**Available command line arguments:** **Available command line arguments:**
| Option | Default value | Description | | Option | Default value | Description |
| --------------------- | --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | -------------------------------- | --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket | | `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket |
| `--drivername` | `cephfs.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) | | `--drivername` | `cephfs.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) |
| `--nodeid` | _empty_ | This node's ID | | `--nodeid` | _empty_ | This node's ID |
| `--type` | _empty_ | Driver type `[rbd | cephfs]` If the driver type is set to `rbd` it will act as a `rbd plugin` or if it's set to `cephfs` will act as a `cephfs plugin` | | `--type` | _empty_ | Driver type `[rbd | cephfs]` If the driver type is set to `rbd` it will act as a `rbd plugin` or if it's set to `cephfs` will act as a `cephfs plugin` |
| `--mountcachedir` | _empty_ | Volume mount cache info save dir. If left unspecified, the dirver will not record mount info, or it will save mount info and when driver restart it will remount volume it cached. | | `--mountcachedir` | _empty_ | Volume mount cache info save dir. If left unspecified, the dirver will not record mount info, or it will save mount info and when driver restart it will remount volume it cached. |
| `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning | | `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning |
| `--pluginpath` | "/var/lib/kubelet/plugins/" | The location of cephcsi plugin on host | | `--pluginpath` | "/var/lib/kubelet/plugins/" | The location of cephcsi plugin on host |
| `--metadatastorage` | _empty_ | Points to where older (1.0.0 or older plugin versions) metadata about provisioned volumes are kept, as file or in as k8s configmap (`node` or `k8s_configmap` respectively) | | `--metadatastorage` | _empty_ | Points to where older (1.0.0 or older plugin versions) metadata about provisioned volumes are kept, as file or in as k8s configmap (`node` or `k8s_configmap` respectively) |
| `--pidlimit` | _0_ | Configure the PID limit in cgroups. The container runtime can restrict the number of processes/tasks which can cause problems while provisioning (or deleting) a large number of volumes. A value of `-1` configures the limit to the maximum, `0` does not configure limits at all. | | `--pidlimit` | _0_ | Configure the PID limit in cgroups. The container runtime can restrict the number of processes/tasks which can cause problems while provisioning (or deleting) a large number of volumes. A value of `-1` configures the limit to the maximum, `0` does not configure limits at all. |
| `--metricsport` | `8080` | TCP port for /grpc metrics requests | | `--metricsport` | `8080` | TCP port for /grpc metrics requests |
| `--metricspath` | `/metrics` | Path of prometheus endpoint where metrics will be available | | `--metricspath` | `/metrics` | Path of prometheus endpoint where metrics will be available |
| `--enablegrpcmetrics` | `false` | Enable grpc metrics collection and start prometheus server | | `--enablegrpcmetrics` | `false` | Enable grpc metrics collection and start prometheus server |
| `--polltime` | `60s` | Time interval in between each poll | | `--polltime` | `60s` | Time interval in between each poll |
| `--timeout` | `3s` | Probe timeout in seconds | | `--timeout` | `3s` | Probe timeout in seconds |
| `--histogramoption` | `0.5,2,6` | Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) | | `--histogramoption` | `0.5,2,6` | Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
| `--forcecephkernelclient` | `false` | Force enabling Ceph Kernel clients for mounting on kernels < 4.17 |
**NOTE:** The parameter `-forcecephkernelclient` enables the Kernel
CephFS mounter on kernels < 4.17.
**This is not recommended/supported if the kernel does not support quota.**
**Available environmental variables:** **Available environmental variables:**

View File

@ -95,7 +95,7 @@ func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
// Configuration // Configuration
PluginFolder = conf.PluginPath PluginFolder = conf.PluginPath
if err := loadAvailableMounters(); err != nil { if err := loadAvailableMounters(conf); err != nil {
klog.Fatalf("cephfs: failed to load ceph mounters: %v", err) klog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
} }

View File

@ -49,7 +49,7 @@ var (
// Load available ceph mounters installed on system into availableMounters // Load available ceph mounters installed on system into availableMounters
// Called from driver.go's Run() // Called from driver.go's Run()
func loadAvailableMounters() error { func loadAvailableMounters(conf *util.Config) error {
// #nosec // #nosec
fuseMounterProbe := exec.Command("ceph-fuse", "--version") fuseMounterProbe := exec.Command("ceph-fuse", "--version")
// #nosec // #nosec
@ -70,7 +70,8 @@ func loadAvailableMounters() error {
if err != nil { if err != nil {
return err return err
} }
if majorVers > 4 || (majorVers >= 4 && minorVers >= 17) {
if conf.ForceKernelCephFS || majorVers > 4 || (majorVers >= 4 && minorVers >= 17) {
klog.Infof("loaded mounter: %s", volumeMounterKernel) klog.Infof("loaded mounter: %s", volumeMounterKernel)
availableMounters = append(availableMounters, volumeMounterKernel) availableMounters = append(availableMounters, volumeMounterKernel)
} else { } else {

View File

@ -97,9 +97,13 @@ type Config struct {
IsControllerServer bool // if set to true start provisoner server IsControllerServer bool // if set to true start provisoner server
IsNodeServer bool // if set to true start node server IsNodeServer bool // if set to true start node server
// rbd related flag // rbd related flag
Containerized bool // whether run as containerized Containerized bool // whether run as containerized
// cephfs related flags
ForceKernelCephFS bool // force to use the ceph kernel client even if the kernel is < 4.17
} }
// CreatePersistanceStorage creates storage path and initializes new cache // CreatePersistanceStorage creates storage path and initializes new cache