cephfs: connect snapshot to Ceph cluster in newSnapshotOptionsFromID()

Without connection, follow-up oparations on the volumeOptions object
will cause a panic. This should fix a regression in CephFS testing.

Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
Niels de Vos 2020-10-19 09:08:57 +02:00 committed by mergify[bot]
parent a128aa430b
commit 0f9087d05e
2 changed files with 14 additions and 0 deletions

View File

@ -691,6 +691,7 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
}
return nil, status.Error(codes.Internal, err.Error())
}
defer volOpt.Destroy()
// safeguard against parallel create or delete requests against the same
// name

View File

@ -478,6 +478,19 @@ func newSnapshotOptionsFromID(ctx context.Context, snapID string, cr *util.Crede
return &volOptions, nil, &sid, fmt.Errorf("failed to fetch subvolumegroup list using clusterID (%s): %w", vi.ClusterID, err)
}
err = volOptions.Connect(cr)
if err != nil {
return &volOptions, nil, &sid, err
}
// in case of an error, volOptions is returned, but callers may not
// expect to need to call Destroy() on it. So, make sure to release any
// resources that may have been allocated
defer func() {
if err != nil {
volOptions.Destroy()
}
}()
volOptions.FsName, err = volOptions.getFsName(ctx)
if err != nil {
return &volOptions, nil, &sid, err