move flag configuration variable to util

remove unwanted checks
remove getting drivertype from binary name

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2019-08-14 11:27:45 +05:30
committed by mergify[bot]
parent 2b1355061e
commit 89732d923f
5 changed files with 95 additions and 80 deletions

View File

@ -19,9 +19,7 @@ package main
import (
"flag"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/ceph/ceph-csi/pkg/cephfs"
@ -42,32 +40,34 @@ const (
)
var (
// common flags
vtype = flag.String("type", "", "driver type [rbd|cephfs|liveness]")
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "", "name of the driver")
nodeID = flag.String("nodeid", "", "node id")
instanceID = flag.String("instanceid", "", "Unique ID distinguishing this instance of Ceph CSI among other"+
" instances, when sharing Ceph clusters across CSI instances for provisioning")
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
pluginPath = flag.String("pluginpath", "/var/lib/kubelet/plugins/", "the location of cephcsi plugin")
pidLimit = flag.Int("pidlimit", 0, "the PID limit to configure through cgroups")
// rbd related flags
containerized = flag.Bool("containerized", true, "whether run as containerized")
// cephfs related flags
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir")
// livenes related flags
livenessport = flag.Int("livenessport", 8080, "TCP port for liveness requests")
livenesspath = flag.String("livenesspath", "/metrics", "path of prometheus endpoint where metrics will be available")
pollTime = flag.Duration("polltime", time.Second*60, "time interval in seconds between each poll")
timeout = flag.Duration("timeout", time.Second*3, "probe timeout in seconds")
conf util.Config
)
func init() {
// common flags
flag.StringVar(&conf.Vtype, "type", "", "driver type [rbd|cephfs|liveness]")
flag.StringVar(&conf.Endpoint, "endpoint", "unix://tmp/csi.sock", "CSI endpoint")
flag.StringVar(&conf.DriverName, "drivername", "", "name of the driver")
flag.StringVar(&conf.NodeID, "nodeid", "", "node id")
flag.StringVar(&conf.InstanceID, "instanceid", "", "Unique ID distinguishing this instance of Ceph CSI among other"+
" instances, when sharing Ceph clusters across CSI instances for provisioning")
flag.StringVar(&conf.MetadataStorage, "metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
flag.StringVar(&conf.PluginPath, "pluginpath", "/var/lib/kubelet/plugins/", "the location of cephcsi plugin")
flag.IntVar(&conf.PidLimit, "pidlimit", 0, "the PID limit to configure through cgroups")
// rbd related flags
flag.BoolVar(&conf.Containerized, "containerized", true, "whether run as containerized")
// cephfs related flags
flag.StringVar(&conf.VolumeMounter, "volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
flag.StringVar(&conf.MountCacheDir, "mountcachedir", "", "mount info cache save dir")
// livenes related flags
flag.IntVar(&conf.LivenessPort, "livenessport", 8080, "TCP port for liveness requests")
flag.StringVar(&conf.LivenessPath, "livenesspath", "/metrics", "path of prometheus endpoint where metrics will be available")
flag.DurationVar(&conf.PollTime, "polltime", time.Second*60, "time interval in seconds between each poll")
flag.DurationVar(&conf.PoolTimeout, "timeout", time.Second*3, "probe timeout in seconds")
klog.InitFlags(nil)
if err := flag.Set("logtostderr", "true"); err != nil {
klog.Exitf("failed to set logtostderr flag: %v", err)
@ -75,27 +75,13 @@ func init() {
flag.Parse()
}
func getType() string {
if vtype == nil || *vtype == "" {
a0 := path.Base(os.Args[0])
if strings.Contains(a0, rbdType) {
return rbdType
}
if strings.Contains(a0, cephfsType) {
return cephfsType
}
return ""
}
return *vtype
}
func getDriverName() string {
// was explicitly passed a driver name
if driverName != nil && *driverName != "" {
return *driverName
if conf.DriverName != "" {
return conf.DriverName
}
// select driver name based on volume type
switch getType() {
switch conf.Vtype {
case rbdType:
return rbdDefaultName
case cephfsType:
@ -111,8 +97,7 @@ func main() {
klog.Infof("Driver version: %s and Git version: %s", util.DriverVersion, util.GitCommit)
var cp util.CachePersister
driverType := getType()
if driverType == "" {
if conf.Vtype == "" {
klog.Fatalln("driver type not specified")
}
@ -121,50 +106,50 @@ func main() {
if err != nil {
klog.Fatalln(err) // calls exit
}
csipluginPath := filepath.Join(*pluginPath, dname)
if *metadataStorage != "" {
csipluginPath := filepath.Join(conf.PluginPath, dname)
if conf.MetadataStorage != "" {
cp, err = util.CreatePersistanceStorage(
csipluginPath, *metadataStorage, *pluginPath)
csipluginPath, conf.MetadataStorage, conf.PluginPath)
if err != nil {
os.Exit(1)
}
}
// the driver may need a higher PID limit for handling all concurrent requests
if pidLimit != nil && *pidLimit != 0 {
if conf.PidLimit != 0 {
currentLimit, err := util.GetPIDLimit()
if err != nil {
klog.Errorf("Failed to get the PID limit, can not reconfigure: %v", err)
} else {
klog.Infof("Initial PID limit is set to %d", currentLimit)
err = util.SetPIDLimit(*pidLimit)
err = util.SetPIDLimit(conf.PidLimit)
if err != nil {
klog.Errorf("Failed to set new PID limit to %d: %v", *pidLimit, err)
klog.Errorf("Failed to set new PID limit to %d: %v", conf.PidLimit, err)
} else {
s := ""
if *pidLimit == -1 {
if conf.PidLimit == -1 {
s = " (max)"
}
klog.Infof("Reconfigured PID limit to %d%s", *pidLimit, s)
klog.Infof("Reconfigured PID limit to %d%s", conf.PidLimit, s)
}
}
}
klog.Infof("Starting driver type: %v with name: %v", driverType, dname)
switch driverType {
klog.Infof("Starting driver type: %v with name: %v", conf.Vtype, dname)
switch conf.Vtype {
case rbdType:
driver := rbd.NewDriver()
driver.Run(dname, *nodeID, *endpoint, *instanceID, *containerized, cp, driverType)
driver.Run(&conf, cp)
case cephfsType:
driver := cephfs.NewDriver()
driver.Run(dname, *nodeID, *endpoint, *volumeMounter, *mountCacheDir, *instanceID, csipluginPath, cp, driverType)
driver.Run(&conf, cp)
case livenessType:
liveness.Run(*endpoint, *livenesspath, *livenessport, *pollTime, *timeout)
liveness.Run(&conf)
default:
klog.Fatalln("invalid volume type", vtype) // calls exit
klog.Fatalln("invalid volume type", conf.Vtype) // calls exit
}
os.Exit(0)