mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-04-11 18:13:00 +00:00
Correct static errors and source code comments.
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
parent
399f0b0d89
commit
b1dfcb4d7e
@ -21,7 +21,6 @@ import (
|
|||||||
|
|
||||||
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||||
"github.com/ceph/ceph-csi/pkg/util"
|
"github.com/ceph/ceph-csi/pkg/util"
|
||||||
|
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
@ -311,7 +310,7 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpandVolume expand CephFS Volumes on demand based on resizer request
|
// ControllerExpandVolume expands CephFS Volumes on demand based on resizer request
|
||||||
func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||||
if err := cs.validateExpandVolumeRequest(req); err != nil {
|
if err := cs.validateExpandVolumeRequest(req); err != nil {
|
||||||
klog.Errorf(util.Log(ctx, "ControllerExpandVolumeRequest validation failed: %v"), err)
|
klog.Errorf(util.Log(ctx, "ControllerExpandVolumeRequest validation failed: %v"), err)
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ceph/ceph-csi/pkg/util"
|
"github.com/ceph/ceph-csi/pkg/util"
|
||||||
|
|
||||||
connlib "github.com/kubernetes-csi/csi-lib-utils/connection"
|
connlib "github.com/kubernetes-csi/csi-lib-utils/connection"
|
||||||
"github.com/kubernetes-csi/csi-lib-utils/rpc"
|
"github.com/kubernetes-csi/csi-lib-utils/rpc"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
@ -80,6 +79,7 @@ func recordLiveness(endpoint string, pollTime, timeout time.Duration) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Run starts liveness collection and prometheus endpoint
|
||||||
func Run(conf *util.Config) {
|
func Run(conf *util.Config) {
|
||||||
klog.V(3).Infof("Liveness Running")
|
klog.V(3).Infof("Liveness Running")
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@ type connEntry struct {
|
|||||||
users int
|
users int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConnPool is the struct which contains details of connection entries in the pool and gc controlled params.
|
||||||
type ConnPool struct {
|
type ConnPool struct {
|
||||||
// interval to run the garbage collector
|
// interval to run the garbage collector
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
@ -45,7 +46,7 @@ type ConnPool struct {
|
|||||||
conns map[string]*connEntry
|
conns map[string]*connEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new ConnPool instance and start the garbage collector running
|
// NewConnPool creates a new connection pool instance and start the garbage collector running
|
||||||
// every @interval.
|
// every @interval.
|
||||||
func NewConnPool(interval, expiry time.Duration) *ConnPool {
|
func NewConnPool(interval, expiry time.Duration) *ConnPool {
|
||||||
cp := ConnPool{
|
cp := ConnPool{
|
||||||
@ -76,7 +77,7 @@ func (cp *ConnPool) gc() {
|
|||||||
cp.timer.Reset(cp.interval)
|
cp.timer.Reset(cp.interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the garbage collector and destroy all connections in the pool.
|
// Destroy stops the garbage collector and destroys all connections in the pool.
|
||||||
func (cp *ConnPool) Destroy() {
|
func (cp *ConnPool) Destroy() {
|
||||||
cp.timer.Stop()
|
cp.timer.Stop()
|
||||||
// wait until gc() has finished, in case it is running
|
// wait until gc() has finished, in case it is running
|
||||||
@ -116,7 +117,7 @@ func (cp *ConnPool) getConn(unique string) *rados.Conn {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a rados.Conn for the given arguments. Creates a new rados.Conn in
|
// Get returns a rados.Conn for the given arguments. Creates a new rados.Conn in
|
||||||
// case there is none in the pool. Use the returned unique string to reduce the
|
// case there is none in the pool. Use the returned unique string to reduce the
|
||||||
// reference count with ConnPool.Put(unique).
|
// reference count with ConnPool.Put(unique).
|
||||||
func (cp *ConnPool) Get(pool, monitors, keyfile string) (*rados.Conn, error) {
|
func (cp *ConnPool) Get(pool, monitors, keyfile string) (*rados.Conn, error) {
|
||||||
@ -168,7 +169,7 @@ func (cp *ConnPool) Get(pool, monitors, keyfile string) (*rados.Conn, error) {
|
|||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reduce the reference count of the rados.Conn object that was returned with
|
// Put reduces the reference count of the rados.Conn object that was returned with
|
||||||
// ConnPool.Get().
|
// ConnPool.Get().
|
||||||
func (cp *ConnPool) Put(conn *rados.Conn) {
|
func (cp *ConnPool) Put(conn *rados.Conn) {
|
||||||
cp.lock.Lock()
|
cp.lock.Lock()
|
||||||
|
@ -46,7 +46,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
kmsKMS represents a Hashicorp Vault KMS configuration
|
VaultKMS represents a Hashicorp Vault KMS configuration
|
||||||
|
|
||||||
Example JSON structure in the KMS config is,
|
Example JSON structure in the KMS config is,
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user