2018-01-09 18:59:50 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2021-12-09 08:18:39 +00:00
|
|
|
package rbddriver
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
import (
|
2022-04-14 09:49:17 +00:00
|
|
|
"errors"
|
2021-11-26 19:20:03 +00:00
|
|
|
"fmt"
|
2022-04-14 09:49:17 +00:00
|
|
|
"os"
|
2021-11-26 19:20:03 +00:00
|
|
|
|
|
|
|
casrbd "github.com/ceph/ceph-csi/internal/csi-addons/rbd"
|
|
|
|
csiaddons "github.com/ceph/ceph-csi/internal/csi-addons/server"
|
2020-04-17 09:23:49 +00:00
|
|
|
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
2021-12-09 08:18:39 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/rbd"
|
2024-09-13 10:16:01 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/rbd/features"
|
2020-04-17 09:23:49 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2023-10-30 08:23:12 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/k8s"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2018-10-15 14:59:41 +00:00
|
|
|
|
2019-02-18 11:30:28 +00:00
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
2018-01-09 18:59:50 +00:00
|
|
|
)
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// Driver contains the default identity,node and controller struct.
|
2019-01-17 07:51:06 +00:00
|
|
|
type Driver struct {
|
2023-06-20 09:44:39 +00:00
|
|
|
cd *csicommon.CSIDriver
|
2021-12-09 08:18:39 +00:00
|
|
|
ids *rbd.IdentityServer
|
|
|
|
ns *rbd.NodeServer
|
|
|
|
cs *rbd.ControllerServer
|
2021-11-26 19:20:03 +00:00
|
|
|
|
|
|
|
// cas is the CSIAddonsServer where CSI-Addons services are handled
|
|
|
|
cas *csiaddons.CSIAddonsServer
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NewDriver returns new rbd driver.
|
2019-01-28 11:47:06 +00:00
|
|
|
func NewDriver() *Driver {
|
2019-01-17 07:51:06 +00:00
|
|
|
return &Driver{}
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NewIdentityServer initialize a identity server for rbd CSI driver.
|
2021-12-09 08:18:39 +00:00
|
|
|
func NewIdentityServer(d *csicommon.CSIDriver) *rbd.IdentityServer {
|
|
|
|
return &rbd.IdentityServer{
|
2018-01-09 18:59:50 +00:00
|
|
|
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NewControllerServer initialize a controller server for rbd CSI driver.
|
2021-12-09 08:18:39 +00:00
|
|
|
func NewControllerServer(d *csicommon.CSIDriver) *rbd.ControllerServer {
|
|
|
|
return &rbd.ControllerServer{
|
2018-01-09 18:59:50 +00:00
|
|
|
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
|
2019-09-12 04:53:37 +00:00
|
|
|
VolumeLocks: util.NewVolumeLocks(),
|
|
|
|
SnapshotLocks: util.NewVolumeLocks(),
|
2024-11-11 11:42:48 +00:00
|
|
|
VolumeGroupLocks: util.NewVolumeLocks(),
|
2020-07-13 05:28:17 +00:00
|
|
|
OperationLocks: util.NewOperationLock(),
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NewNodeServer initialize a node server for rbd CSI driver.
|
2023-02-02 09:46:37 +00:00
|
|
|
func NewNodeServer(
|
|
|
|
d *csicommon.CSIDriver,
|
|
|
|
t string,
|
2023-10-30 08:23:12 +00:00
|
|
|
nodeLabels, topology, crushLocationMap map[string]string,
|
2024-06-12 06:51:51 +00:00
|
|
|
) *rbd.NodeServer {
|
2023-11-17 06:29:00 +00:00
|
|
|
cliReadAffinityMapOptions := util.ConstructReadAffinityMapOption(crushLocationMap)
|
2023-02-02 09:46:37 +00:00
|
|
|
ns := rbd.NodeServer{
|
2023-11-17 06:29:00 +00:00
|
|
|
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, cliReadAffinityMapOptions, topology, nodeLabels),
|
|
|
|
VolumeLocks: util.NewVolumeLocks(),
|
2023-02-02 09:46:37 +00:00
|
|
|
}
|
|
|
|
|
2024-06-12 06:51:51 +00:00
|
|
|
return &ns
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// Run start a non-blocking grpc controller,node and identityserver for
|
2020-07-19 12:21:03 +00:00
|
|
|
// rbd CSI driver which can serve multiple parallel requests.
|
2021-11-26 19:20:03 +00:00
|
|
|
//
|
|
|
|
// This also configures and starts a new CSI-Addons service, by calling
|
|
|
|
// setupCSIAddonsServer().
|
2020-07-10 10:44:59 +00:00
|
|
|
func (r *Driver) Run(conf *util.Config) {
|
2023-02-02 09:46:37 +00:00
|
|
|
var (
|
2023-10-30 08:23:12 +00:00
|
|
|
err error
|
|
|
|
nodeLabels, topology, crushLocationMap map[string]string
|
2023-02-02 09:46:37 +00:00
|
|
|
)
|
2020-06-24 06:44:02 +00:00
|
|
|
// update clone soft and hard limit
|
2021-12-09 08:18:39 +00:00
|
|
|
rbd.SetGlobalInt("rbdHardMaxCloneDepth", conf.RbdHardMaxCloneDepth)
|
|
|
|
rbd.SetGlobalInt("rbdSoftMaxCloneDepth", conf.RbdSoftMaxCloneDepth)
|
|
|
|
rbd.SetGlobalBool("skipForceFlatten", conf.SkipForceFlatten)
|
|
|
|
rbd.SetGlobalInt("maxSnapshotsOnImage", conf.MaxSnapshotsOnImage)
|
|
|
|
rbd.SetGlobalInt("minSnapshotsOnImageToStartFlatten", conf.MinSnapshotsOnImage)
|
2020-05-11 21:21:30 +00:00
|
|
|
// Create instances of the volume and snapshot journal
|
2021-12-09 08:18:39 +00:00
|
|
|
rbd.InitJournals(conf.InstanceID)
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
// Initialize default library driver
|
2024-08-02 13:19:02 +00:00
|
|
|
r.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID, conf.InstanceID)
|
2019-01-17 06:18:18 +00:00
|
|
|
if r.cd == nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.FatalLogMsg("Failed to initialize CSI Driver.")
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-08-14 06:42:17 +00:00
|
|
|
if conf.IsControllerServer || !conf.IsNodeServer {
|
|
|
|
r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
|
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
|
|
|
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
|
|
|
|
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
|
2019-11-27 12:14:31 +00:00
|
|
|
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
|
2019-08-14 06:42:17 +00:00
|
|
|
})
|
2021-06-25 11:39:42 +00:00
|
|
|
// We only support the multi-writer option when using block, but it's a supported capability for the plugin in
|
|
|
|
// general
|
2019-08-14 06:42:17 +00:00
|
|
|
// In addition, we want to add the remaining modes like MULTI_NODE_READER_ONLY,
|
|
|
|
// MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first
|
2021-08-17 05:39:56 +00:00
|
|
|
// will work those as follow-up features
|
2019-08-14 06:42:17 +00:00
|
|
|
r.cd.AddVolumeCapabilityAccessModes(
|
2021-07-13 12:21:05 +00:00
|
|
|
[]csi.VolumeCapability_AccessMode_Mode{
|
|
|
|
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
|
|
|
|
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
2021-10-26 06:51:35 +00:00
|
|
|
csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
|
|
|
|
csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
|
2021-07-13 12:21:05 +00:00
|
|
|
})
|
2024-09-13 10:16:01 +00:00
|
|
|
|
|
|
|
// GroupSnapGetInfo is used within the VolumeGroupSnapshot implementation
|
|
|
|
vgsSupported, vgsErr := features.SupportsGroupSnapGetInfo()
|
|
|
|
if vgsSupported {
|
|
|
|
r.cd.AddGroupControllerServiceCapabilities([]csi.GroupControllerServiceCapability_RPC_Type{
|
|
|
|
csi.GroupControllerServiceCapability_RPC_CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT,
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
log.DefaultLog("not enabling VolumeGroupSnapshot service capability")
|
|
|
|
}
|
|
|
|
if vgsErr != nil {
|
|
|
|
log.ErrorLogMsg("failed detecting VolumeGroupSnapshot support: %v", vgsErr)
|
|
|
|
}
|
2019-08-14 06:42:17 +00:00
|
|
|
}
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2023-12-09 11:43:07 +00:00
|
|
|
if k8s.RunsOnKubernetes() && conf.IsNodeServer {
|
2023-10-30 08:23:12 +00:00
|
|
|
nodeLabels, err = k8s.GetNodeLabels(conf.NodeID)
|
2023-02-02 09:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
log.FatalLogMsg(err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-30 08:23:12 +00:00
|
|
|
if conf.EnableReadAffinity {
|
|
|
|
crushLocationMap = util.GetCrushLocationMap(conf.CrushLocationLabels, nodeLabels)
|
|
|
|
}
|
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
// Create GRPC servers
|
2019-01-17 06:18:18 +00:00
|
|
|
r.ids = NewIdentityServer(r.cd)
|
2019-08-14 06:42:17 +00:00
|
|
|
|
|
|
|
if conf.IsNodeServer {
|
2020-01-24 16:26:56 +00:00
|
|
|
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.FatalLogMsg(err.Error())
|
2020-01-24 16:26:56 +00:00
|
|
|
}
|
2024-05-23 14:04:28 +00:00
|
|
|
r.ns = NewNodeServer(r.cd, conf.Vtype, nodeLabels, topology, crushLocationMap)
|
|
|
|
|
2021-10-05 09:29:07 +00:00
|
|
|
var attr string
|
2021-12-09 08:18:39 +00:00
|
|
|
attr, err = rbd.GetKrbdSupportedFeatures()
|
2022-04-14 09:49:17 +00:00
|
|
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
2021-10-05 09:29:07 +00:00
|
|
|
log.FatalLogMsg(err.Error())
|
|
|
|
}
|
2021-12-09 08:18:39 +00:00
|
|
|
var krbdFeatures uint
|
|
|
|
krbdFeatures, err = rbd.HexStringToInteger(attr)
|
2021-10-05 09:29:07 +00:00
|
|
|
if err != nil {
|
|
|
|
log.FatalLogMsg(err.Error())
|
|
|
|
}
|
2021-12-09 08:18:39 +00:00
|
|
|
rbd.SetGlobalInt("krbdFeatures", krbdFeatures)
|
2022-04-11 08:09:14 +00:00
|
|
|
|
|
|
|
rbd.SetRbdNbdToolFeatures()
|
2018-10-15 14:59:41 +00:00
|
|
|
}
|
2018-12-19 14:26:16 +00:00
|
|
|
|
2019-08-14 06:42:17 +00:00
|
|
|
if conf.IsControllerServer {
|
2020-07-10 10:44:59 +00:00
|
|
|
r.cs = NewControllerServer(r.cd)
|
2022-04-11 04:27:29 +00:00
|
|
|
r.cs.ClusterName = conf.ClusterName
|
2022-04-12 04:03:00 +00:00
|
|
|
r.cs.SetMetadata = conf.SetMetadata
|
2023-06-20 09:44:39 +00:00
|
|
|
}
|
|
|
|
|
2023-11-08 11:40:38 +00:00
|
|
|
// configure CSI-Addons server and components
|
2023-06-20 09:44:39 +00:00
|
|
|
err = r.setupCSIAddonsServer(conf)
|
|
|
|
if err != nil {
|
|
|
|
log.FatalLogMsg(err.Error())
|
2019-08-14 06:42:17 +00:00
|
|
|
}
|
2018-12-19 14:26:16 +00:00
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
s := csicommon.NewNonBlockingGRPCServer()
|
2021-02-08 12:25:18 +00:00
|
|
|
srv := csicommon.Servers{
|
|
|
|
IS: r.ids,
|
|
|
|
CS: r.cs,
|
|
|
|
NS: r.ns,
|
2024-09-13 10:16:01 +00:00
|
|
|
GS: r.cs,
|
2021-02-08 12:25:18 +00:00
|
|
|
}
|
2024-09-17 13:52:30 +00:00
|
|
|
s.Start(conf.Endpoint, srv, csicommon.MiddlewareServerOptionConfig{
|
|
|
|
LogSlowOpInterval: conf.LogSlowOpInterval,
|
|
|
|
})
|
2021-11-29 09:49:18 +00:00
|
|
|
|
|
|
|
r.startProfiling(conf)
|
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
if conf.IsNodeServer {
|
|
|
|
go func() {
|
|
|
|
// TODO: move the healer to csi-addons
|
2021-12-09 08:18:39 +00:00
|
|
|
err := rbd.RunVolumeHealer(r.ns, conf)
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLogMsg("healer had failures, err %v\n", err)
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2018-01-09 18:59:50 +00:00
|
|
|
s.Wait()
|
|
|
|
}
|
2021-11-26 19:20:03 +00:00
|
|
|
|
|
|
|
// setupCSIAddonsServer creates a new CSI-Addons Server on the given (URL)
|
|
|
|
// endpoint. The supported CSI-Addons operations get registered as their own
|
|
|
|
// services.
|
2021-11-30 17:02:35 +00:00
|
|
|
func (r *Driver) setupCSIAddonsServer(conf *util.Config) error {
|
2021-11-26 19:20:03 +00:00
|
|
|
var err error
|
|
|
|
|
2021-11-30 17:02:35 +00:00
|
|
|
r.cas, err = csiaddons.NewCSIAddonsServer(conf.CSIAddonsEndpoint)
|
2021-11-26 19:20:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create CSI-Addons server: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// register services
|
2021-11-30 17:02:35 +00:00
|
|
|
is := casrbd.NewIdentityServer(conf)
|
|
|
|
r.cas.RegisterService(is)
|
2021-11-26 19:20:03 +00:00
|
|
|
|
2021-12-08 16:40:04 +00:00
|
|
|
if conf.IsControllerServer {
|
2024-05-23 14:04:28 +00:00
|
|
|
rs := casrbd.NewReclaimSpaceControllerServer(r.cs.VolumeLocks)
|
2021-12-08 16:40:04 +00:00
|
|
|
r.cas.RegisterService(rs)
|
2022-01-06 12:06:00 +00:00
|
|
|
|
|
|
|
fcs := casrbd.NewFenceControllerServer()
|
|
|
|
r.cas.RegisterService(fcs)
|
2022-08-15 05:23:50 +00:00
|
|
|
|
2024-08-02 13:19:02 +00:00
|
|
|
rcs := casrbd.NewReplicationServer(conf.InstanceID, NewControllerServer(r.cd))
|
2022-08-15 05:23:50 +00:00
|
|
|
r.cas.RegisterService(rcs)
|
2024-06-24 12:18:50 +00:00
|
|
|
|
2024-07-17 08:08:27 +00:00
|
|
|
vgcs := casrbd.NewVolumeGroupServer(conf.InstanceID)
|
2024-06-24 12:18:50 +00:00
|
|
|
r.cas.RegisterService(vgcs)
|
2021-12-08 16:40:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if conf.IsNodeServer {
|
2024-11-04 13:32:26 +00:00
|
|
|
fcs := casrbd.NewFenceControllerServer()
|
|
|
|
r.cas.RegisterService(fcs)
|
|
|
|
|
2024-05-23 14:04:28 +00:00
|
|
|
rs := casrbd.NewReclaimSpaceNodeServer(r.ns.VolumeLocks)
|
2021-12-08 16:40:04 +00:00
|
|
|
r.cas.RegisterService(rs)
|
2024-06-21 10:19:06 +00:00
|
|
|
|
|
|
|
ekr := casrbd.NewEncryptionKeyRotationServer(r.ns.VolumeLocks)
|
|
|
|
r.cas.RegisterService(ekr)
|
2021-12-08 16:40:04 +00:00
|
|
|
}
|
|
|
|
|
2021-11-26 19:20:03 +00:00
|
|
|
// start the server, this does not block, it runs a new go-routine
|
2024-09-17 13:52:30 +00:00
|
|
|
err = r.cas.Start(csicommon.MiddlewareServerOptionConfig{
|
|
|
|
LogSlowOpInterval: conf.LogSlowOpInterval,
|
|
|
|
})
|
2021-11-26 19:20:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to start CSI-Addons server: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-11-29 09:49:18 +00:00
|
|
|
|
|
|
|
// startProfiling checks which profiling options are enabled in the config and
|
|
|
|
// starts the required profiling services.
|
|
|
|
func (r *Driver) startProfiling(conf *util.Config) {
|
|
|
|
if conf.EnableProfiling {
|
2023-11-02 12:00:55 +00:00
|
|
|
go util.StartMetricsServer(conf)
|
2021-11-29 09:49:18 +00:00
|
|
|
log.DebugLogMsg("Registering profiling handler")
|
|
|
|
go util.EnableProfiling()
|
|
|
|
}
|
|
|
|
}
|