diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index 97d456b25..71c825a41 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -21,7 +21,6 @@ import ( csicommon "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/util" - "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -311,7 +310,7 @@ func (cs *ControllerServer) ValidateVolumeCapabilities( }, nil } -// ExpandVolume expand CephFS Volumes on demand based on resizer request +// ControllerExpandVolume expands CephFS Volumes on demand based on resizer request func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { if err := cs.validateExpandVolumeRequest(req); err != nil { klog.Errorf(util.Log(ctx, "ControllerExpandVolumeRequest validation failed: %v"), err) diff --git a/pkg/liveness/liveness.go b/pkg/liveness/liveness.go index 5e9055be5..324c118a3 100644 --- a/pkg/liveness/liveness.go +++ b/pkg/liveness/liveness.go @@ -21,7 +21,6 @@ import ( "time" "github.com/ceph/ceph-csi/pkg/util" - connlib "github.com/kubernetes-csi/csi-lib-utils/connection" "github.com/kubernetes-csi/csi-lib-utils/rpc" "github.com/prometheus/client_golang/prometheus" @@ -80,6 +79,7 @@ func recordLiveness(endpoint string, pollTime, timeout time.Duration) { } } +// Run starts liveness collection and prometheus endpoint func Run(conf *util.Config) { klog.V(3).Infof("Liveness Running") diff --git a/pkg/util/conn_pool.go b/pkg/util/conn_pool.go index ba68c156b..9953d660e 100644 --- a/pkg/util/conn_pool.go +++ b/pkg/util/conn_pool.go @@ -32,6 +32,7 @@ type connEntry struct { users int } +// ConnPool is the struct which contains details of connection entries in the pool and gc controlled params. type ConnPool struct { // interval to run the garbage collector interval time.Duration @@ -45,7 +46,7 @@ type ConnPool struct { conns map[string]*connEntry } -// Create a new ConnPool instance and start the garbage collector running +// NewConnPool creates a new connection pool instance and start the garbage collector running // every @interval. func NewConnPool(interval, expiry time.Duration) *ConnPool { cp := ConnPool{ @@ -76,7 +77,7 @@ func (cp *ConnPool) gc() { cp.timer.Reset(cp.interval) } -// Stop the garbage collector and destroy all connections in the pool. +// Destroy stops the garbage collector and destroys all connections in the pool. func (cp *ConnPool) Destroy() { cp.timer.Stop() // wait until gc() has finished, in case it is running @@ -116,7 +117,7 @@ func (cp *ConnPool) getConn(unique string) *rados.Conn { return nil } -// Return a rados.Conn for the given arguments. Creates a new rados.Conn in +// Get returns a rados.Conn for the given arguments. Creates a new rados.Conn in // case there is none in the pool. Use the returned unique string to reduce the // reference count with ConnPool.Put(unique). func (cp *ConnPool) Get(pool, monitors, keyfile string) (*rados.Conn, error) { @@ -168,7 +169,7 @@ func (cp *ConnPool) Get(pool, monitors, keyfile string) (*rados.Conn, error) { return conn, nil } -// Reduce the reference count of the rados.Conn object that was returned with +// Put reduces the reference count of the rados.Conn object that was returned with // ConnPool.Get(). func (cp *ConnPool) Put(conn *rados.Conn) { cp.lock.Lock() diff --git a/pkg/util/vault.go b/pkg/util/vault.go index 11303a632..35b0c01d3 100644 --- a/pkg/util/vault.go +++ b/pkg/util/vault.go @@ -46,7 +46,7 @@ const ( ) /* -kmsKMS represents a Hashicorp Vault KMS configuration +VaultKMS represents a Hashicorp Vault KMS configuration Example JSON structure in the KMS config is, {