mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
cleanup: address golangci 'funcorder' linter problems
The new 'funcorder' linter expects all public functions to be placed before private functions of a struct. Many private functions needed moving further down into their files. Some files had many issues reported. To reduce the churn in those files, they have been annotated with a `//nolint:funcorder` comment. Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
0907f39d95
commit
0a22e3a186
@ -59,23 +59,6 @@ func NewConnPool(interval, expiry time.Duration) *ConnPool {
|
||||
return &cp
|
||||
}
|
||||
|
||||
// loop through all cp.conns and destroy objects that have not been used for cp.expiry.
|
||||
func (cp *ConnPool) gc() {
|
||||
cp.lock.Lock()
|
||||
defer cp.lock.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
for key, ce := range cp.conns {
|
||||
if ce.users == 0 && (now.Sub(ce.lastUsed)) > cp.expiry {
|
||||
ce.destroy()
|
||||
delete(cp.conns, key)
|
||||
}
|
||||
}
|
||||
|
||||
// schedule the next gc() run
|
||||
cp.timer.Reset(cp.interval)
|
||||
}
|
||||
|
||||
// Destroy stops the garbage collector and destroys all connections in the pool.
|
||||
func (cp *ConnPool) Destroy() {
|
||||
cp.timer.Stop()
|
||||
@ -94,30 +77,6 @@ func (cp *ConnPool) Destroy() {
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ConnPool) generateUniqueKey(monitors, user, keyfile string) (string, error) {
|
||||
// the keyfile can be unique for operations, contents will be the same
|
||||
key, err := os.ReadFile(keyfile) // #nosec:G304, file inclusion via variable.
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not open keyfile %s: %w", keyfile, err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s|%s|%s", monitors, user, string(key)), nil
|
||||
}
|
||||
|
||||
// getExisting returns the existing rados.Conn associated with the unique key.
|
||||
//
|
||||
// Requires: locked cp.lock because of ce.get().
|
||||
func (cp *ConnPool) getConn(unique string) *rados.Conn {
|
||||
ce, exists := cp.conns[unique]
|
||||
if exists {
|
||||
ce.get()
|
||||
|
||||
return ce.conn
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns a rados.Conn for the given arguments. Creates a new rados.Conn in
|
||||
// case there is none. Use the returned rados.Conn to reduce the reference
|
||||
// count with ConnPool.Put(unique).
|
||||
@ -206,6 +165,47 @@ func (cp *ConnPool) Put(conn *rados.Conn) {
|
||||
}
|
||||
}
|
||||
|
||||
// loop through all cp.conns and destroy objects that have not been used for cp.expiry.
|
||||
func (cp *ConnPool) gc() {
|
||||
cp.lock.Lock()
|
||||
defer cp.lock.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
for key, ce := range cp.conns {
|
||||
if ce.users == 0 && (now.Sub(ce.lastUsed)) > cp.expiry {
|
||||
ce.destroy()
|
||||
delete(cp.conns, key)
|
||||
}
|
||||
}
|
||||
|
||||
// schedule the next gc() run
|
||||
cp.timer.Reset(cp.interval)
|
||||
}
|
||||
|
||||
func (cp *ConnPool) generateUniqueKey(monitors, user, keyfile string) (string, error) {
|
||||
// the keyfile can be unique for operations, contents will be the same
|
||||
key, err := os.ReadFile(keyfile) // #nosec:G304, file inclusion via variable.
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not open keyfile %s: %w", keyfile, err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s|%s|%s", monitors, user, string(key)), nil
|
||||
}
|
||||
|
||||
// getExisting returns the existing rados.Conn associated with the unique key.
|
||||
//
|
||||
// Requires: locked cp.lock because of ce.get().
|
||||
func (cp *ConnPool) getConn(unique string) *rados.Conn {
|
||||
ce, exists := cp.conns[unique]
|
||||
if exists {
|
||||
ce.get()
|
||||
|
||||
return ce.conn
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add a reference to the connEntry.
|
||||
// /!\ Only call this while holding the ConnPool.lock.
|
||||
func (ce *connEntry) get() {
|
||||
|
Reference in New Issue
Block a user