util/conn_pool: open a connection with requested user

Use the Credentials.ID in combination with the keyfile to connect to the
Ceph cluster. This makes it possible to use different users for
different tasks on the cluster.

Fixes: #904
Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
Niels de Vos 2020-04-08 15:15:50 +02:00 committed by mergify[bot]
parent 2cc59ca411
commit c3cf6be6a7
3 changed files with 10 additions and 10 deletions

View File

@ -165,7 +165,7 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
func (rv *rbdVolume) getIoctx(cr *util.Credentials) (*rados.IOContext, error) {
if rv.conn == nil {
conn, err := connPool.Get(rv.Pool, rv.Monitors, cr.KeyFile)
conn, err := connPool.Get(rv.Pool, rv.Monitors, cr.ID, cr.KeyFile)
if err != nil {
return nil, errors.Wrapf(err, "failed to get connection")
}

View File

@ -95,14 +95,14 @@ func (cp *ConnPool) Destroy() {
}
}
func (cp *ConnPool) generateUniqueKey(pool, monitors, keyfile string) (string, error) {
func (cp *ConnPool) generateUniqueKey(pool, monitors, user, keyfile string) (string, error) {
// the keyfile can be unique for operations, contents will be the same
key, err := ioutil.ReadFile(keyfile) // nolint: gosec, #nosec
if err != nil {
return "", errors.Wrapf(err, "could not open keyfile %s", keyfile)
}
return fmt.Sprintf("%s|%s|%s", pool, monitors, string(key)), nil
return fmt.Sprintf("%s|%s|%s|%s", pool, monitors, user, string(key)), nil
}
// getExisting returns the existing rados.Conn associated with the unique key.
@ -120,8 +120,8 @@ func (cp *ConnPool) getConn(unique string) *rados.Conn {
// Get returns a rados.Conn for the given arguments. Creates a new rados.Conn in
// case there is none in the pool. Use the returned unique string to reduce the
// reference count with ConnPool.Put(unique).
func (cp *ConnPool) Get(pool, monitors, keyfile string) (*rados.Conn, error) {
unique, err := cp.generateUniqueKey(pool, monitors, keyfile)
func (cp *ConnPool) Get(pool, monitors, user, keyfile string) (*rados.Conn, error) {
unique, err := cp.generateUniqueKey(pool, monitors, user, keyfile)
if err != nil {
return nil, errors.Wrapf(err, "failed to generate unique for connection")
}
@ -135,7 +135,7 @@ func (cp *ConnPool) Get(pool, monitors, keyfile string) (*rados.Conn, error) {
// construct and connect a new rados.Conn
args := []string{"-m", monitors, "--keyfile=" + keyfile}
conn, err = rados.NewConn()
conn, err = rados.NewConnWithUser(user)
if err != nil {
return nil, errors.Wrapf(err, "creating a new connection failed")
}

View File

@ -34,8 +34,8 @@ const (
// working Ceph cluster to connect to.
//
// This is mostly a copy of ConnPool.Get()
func (cp *ConnPool) fakeGet(pool, monitors, keyfile string) (*rados.Conn, string, error) {
unique, err := cp.generateUniqueKey(pool, monitors, keyfile)
func (cp *ConnPool) fakeGet(pool, monitors, user, keyfile string) (*rados.Conn, string, error) {
unique, err := cp.generateUniqueKey(pool, monitors, user, keyfile)
if err != nil {
return nil, "", err
}
@ -91,7 +91,7 @@ func TestConnPool(t *testing.T) {
var unique string
t.Run("fakeGet", func(t *testing.T) {
conn, unique, err = cp.fakeGet("pool", "monitors", keyfile)
conn, unique, err = cp.fakeGet("pool", "monitors", "user", keyfile)
if err != nil {
t.Errorf("failed to get connection: %v", err)
}
@ -115,7 +115,7 @@ func TestConnPool(t *testing.T) {
t.Run("doubleFakeGet", func(t *testing.T) {
// after a 2nd get, there should still be a single conn in cp.conns
_, _, err = cp.fakeGet("pool", "monitors", keyfile)
_, _, err = cp.fakeGet("pool", "monitors", "user", keyfile)
if err != nil {
t.Errorf("failed to get connection: %v", err)
}