move flag configuration variable to util

remove unwanted checks
remove getting drivertype from binary name

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2019-08-14 11:27:45 +05:30
committed by mergify[bot]
parent 2b1355061e
commit 89732d923f
5 changed files with 95 additions and 80 deletions

View File

@ -91,20 +91,20 @@ func NewNodeServer(d *csicommon.CSIDriver, t string) *NodeServer {
// Run start a non-blocking grpc controller,node and identityserver for
// ceph CSI driver which can serve multiple parallel requests
func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir, instanceID, pluginPath string, cachePersister util.CachePersister, t string) {
func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
// Configuration
PluginFolder = pluginPath
PluginFolder = conf.PluginPath
if err := loadAvailableMounters(); err != nil {
klog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
}
if volumeMounter != "" {
if err := validateMounter(volumeMounter); err != nil {
if conf.VolumeMounter != "" {
if err := validateMounter(conf.VolumeMounter); err != nil {
klog.Fatalln(err)
} else {
DefaultVolumeMounter = volumeMounter
DefaultVolumeMounter = conf.VolumeMounter
}
} else {
// Pick the first available mounter as the default one.
@ -120,8 +120,8 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir
}
// Use passed in instance ID, if provided for omap suffix naming
if instanceID != "" {
CSIInstanceID = instanceID
if conf.InstanceID != "" {
CSIInstanceID = conf.InstanceID
}
// Get an instance of the volume journal
volJournal = util.NewCSIVolumeJournal()
@ -133,8 +133,8 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir
// metadata pool
volJournal.SetNamespace(radosNamespace)
initVolumeMountCache(driverName, mountCacheDir)
if mountCacheDir != "" {
initVolumeMountCache(conf.DriverName, conf.MountCacheDir)
if conf.MountCacheDir != "" {
if err := remountCachedVolumes(); err != nil {
klog.Warningf("failed to remount cached volumes: %v", err)
// ignore remount fail
@ -142,7 +142,7 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir
}
// Initialize default library driver
fs.cd = csicommon.NewCSIDriver(driverName, util.DriverVersion, nodeID)
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
if fs.cd == nil {
klog.Fatalln("failed to initialize CSI driver")
}
@ -158,11 +158,11 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir
// Create gRPC servers
fs.is = NewIdentityServer(fs.cd)
fs.ns = NewNodeServer(fs.cd, t)
fs.ns = NewNodeServer(fs.cd, conf.Vtype)
fs.cs = NewControllerServer(fs.cd, cachePersister)
server := csicommon.NewNonBlockingGRPCServer()
server.Start(endpoint, fs.is, fs.cs, fs.ns)
server.Start(conf.Endpoint, fs.is, fs.cs, fs.ns)
server.Wait()
}

View File

@ -24,6 +24,8 @@ import (
"strconv"
"time"
"github.com/ceph/ceph-csi/pkg/util"
connlib "github.com/kubernetes-csi/csi-lib-utils/connection"
"github.com/kubernetes-csi/csi-lib-utils/rpc"
"github.com/prometheus/client_golang/prometheus"
@ -83,7 +85,7 @@ func recordLiveness(endpoint string, pollTime, timeout time.Duration) {
}
}
func Run(endpoint, livenessendpoint string, port int, pollTime, timeout time.Duration) {
func Run(conf *util.Config) {
klog.Infof("Liveness Running")
ip := os.Getenv("POD_IP")
@ -94,11 +96,11 @@ func Run(endpoint, livenessendpoint string, port int, pollTime, timeout time.Dur
}
// start liveness collection
go recordLiveness(endpoint, pollTime, timeout)
go recordLiveness(conf.Endpoint, conf.PollTime, conf.PoolTimeout)
// start up prometheus endpoint
addr := net.JoinHostPort(ip, strconv.Itoa(port))
http.Handle(livenessendpoint, promhttp.Handler())
addr := net.JoinHostPort(ip, strconv.Itoa(conf.LivenessPort))
http.Handle(conf.LivenessPath, promhttp.Handler())
err := http.ListenAndServe(addr, nil)
if err != nil {
klog.Fatalln(err)

View File

@ -95,7 +95,7 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool, t string) (*NodeS
// Run start a non-blocking grpc controller,node and identityserver for
// rbd CSI driver which can serve multiple parallel requests
func (r *Driver) Run(driverName, nodeID, endpoint, instanceID string, containerized bool, cachePersister util.CachePersister, t string) {
func (r *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
var err error
// Create ceph.conf for use with CLI commands
@ -104,8 +104,8 @@ func (r *Driver) Run(driverName, nodeID, endpoint, instanceID string, containeri
}
// Use passed in instance ID, if provided for omap suffix naming
if instanceID != "" {
CSIInstanceID = instanceID
if conf.InstanceID != "" {
CSIInstanceID = conf.InstanceID
}
// Get an instance of the volume and snapshot journal keys
@ -117,7 +117,7 @@ func (r *Driver) Run(driverName, nodeID, endpoint, instanceID string, containeri
snapJournal.SetCSIDirectorySuffix(CSIInstanceID)
// Initialize default library driver
r.cd = csicommon.NewCSIDriver(driverName, util.DriverVersion, nodeID)
r.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
if r.cd == nil {
klog.Fatalln("Failed to initialize CSI Driver.")
}
@ -137,7 +137,7 @@ func (r *Driver) Run(driverName, nodeID, endpoint, instanceID string, containeri
// Create GRPC servers
r.ids = NewIdentityServer(r.cd)
r.ns, err = NewNodeServer(r.cd, containerized, t)
r.ns, err = NewNodeServer(r.cd, conf.Containerized, conf.Vtype)
if err != nil {
klog.Fatalf("failed to start node server, err %v\n", err)
}
@ -145,6 +145,6 @@ func (r *Driver) Run(driverName, nodeID, endpoint, instanceID string, containeri
r.cs = NewControllerServer(r.cd, cachePersister)
s := csicommon.NewNonBlockingGRPCServer()
s.Start(endpoint, r.ids, r.cs, r.ns)
s.Start(conf.Endpoint, r.ids, r.cs, r.ns)
s.Wait()
}

View File

@ -20,6 +20,7 @@ import (
"os"
"path"
"strings"
"time"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
@ -50,6 +51,33 @@ var (
DriverVersion string
)
// Config holds the parameters list which can be configured
type Config struct {
// common flags
Vtype string // driver type [rbd|cephfs|liveness]
Endpoint string // CSI endpoint
DriverName string // name of the driver
NodeID string // node id
InstanceID string // unique ID distinguishing this instance of Ceph CSI
MetadataStorage string // metadata persistence method [node|k8s_configmap]
PluginPath string // location of cephcsi plugin
PidLimit int // PID limit to configure through cgroups")
// rbd related flags
Containerized bool // whether run as containerized
// cephfs related flags
VolumeMounter string // default volume mounter (possible options are 'kernel', 'fuse')
MountCacheDir string // mount info cache save dir
// livenes related flags
LivenessPort int // TCP port for liveness requests"
LivenessPath string // path of prometheus endpoint where metrics will be available
PollTime time.Duration // time interval in seconds between each poll
PoolTimeout time.Duration // probe timeout in seconds
}
func roundUpSize(volumeSizeBytes, allocationUnitBytes int64) int64 {
roundedUp := volumeSizeBytes / allocationUnitBytes
if volumeSizeBytes%allocationUnitBytes > 0 {