Fix returning success from DeleteSnapshot for stale requests

Also reduced code duplication in fetching pool list from Ceph.

DeleteSnapshot like DeleteVolume, should return a success when it
detects that the snapshot keys are missing from the RADOS OMaps that
store the snapshot UUID to request name mapping.

This was missing in the code, and is now added.

Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
ShyamsundarR 2019-06-27 21:10:32 -04:00 committed by mergify[bot]
parent 1e8fa38879
commit bc39c523b7
2 changed files with 23 additions and 24 deletions

View File

@ -511,6 +511,13 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
rbdSnap := &rbdSnapshot{}
if err = genSnapFromSnapID(rbdSnap, snapshotID, cr); err != nil {
// if error is ErrKeyNotFound, then a previous attempt at deletion was complete
// or partially complete (snap and snapOMap are garbage collected already), hence return
// success as deletion is complete
if _, ok := err.(util.ErrKeyNotFound); ok {
return &csi.DeleteSnapshotResponse{}, nil
}
// Consider missing snap as already deleted, and proceed to remove the omap values
if _, ok := err.(ErrSnapNotFound); !ok {
return nil, status.Error(codes.Internal, err.Error())

View File

@ -53,9 +53,8 @@ type cephStoragePoolSummary struct {
Number int64 `json:"poolnum"`
}
// GetPoolID searches a list of pools in a cluster and returns the ID of the pool that matches
// the passed in poolName parameter
func GetPoolID(monitors string, cr *Credentials, poolName string) (int64, error) {
// GetPools fetches a list of pools from a cluster
func getPools(monitors string, cr *Credentials) ([]cephStoragePoolSummary, error) {
// ceph <options> -f json osd lspools
// JSON out: [{"poolnum":<int64>,"poolname":<string>}]
@ -69,14 +68,25 @@ func GetPoolID(monitors string, cr *Credentials, poolName string) (int64, error)
"osd", "lspools")
if err != nil {
klog.Errorf("failed getting pool list from cluster (%s)", err)
return 0, err
return nil, err
}
var pools []cephStoragePoolSummary
err = json.Unmarshal(stdout, &pools)
if err != nil {
klog.Errorf("failed to parse JSON output of pool list from cluster (%s)", err)
return 0, fmt.Errorf("unmarshal failed: %+v. raw buffer response: %s", err, string(stdout))
return nil, fmt.Errorf("unmarshal of pool list failed: %+v. raw buffer response: %s", err, string(stdout))
}
return pools, nil
}
// GetPoolID searches a list of pools in a cluster and returns the ID of the pool that matches
// the passed in poolName parameter
func GetPoolID(monitors string, cr *Credentials, poolName string) (int64, error) {
pools, err := getPools(monitors, cr)
if err != nil {
return 0, err
}
for _, p := range pools {
@ -91,29 +101,11 @@ func GetPoolID(monitors string, cr *Credentials, poolName string) (int64, error)
// GetPoolName lists all pools in a ceph cluster, and matches the pool whose pool ID is equal to
// the requested poolID parameter
func GetPoolName(monitors string, cr *Credentials, poolID int64) (string, error) {
// ceph <options> -f json osd lspools
// [{"poolnum":1,"poolname":"replicapool"}]
stdout, _, err := ExecCommand(
"ceph",
"-m", monitors,
"--id", cr.ID,
"--key="+cr.Key,
"-c", CephConfigPath,
"-f", "json",
"osd", "lspools")
pools, err := getPools(monitors, cr)
if err != nil {
klog.Errorf("failed getting pool list from cluster (%s)", err)
return "", err
}
var pools []cephStoragePoolSummary
err = json.Unmarshal(stdout, &pools)
if err != nil {
klog.Errorf("failed to parse JSON output of pool list from cluster (%s)", err)
return "", fmt.Errorf("unmarshal failed: %+v. raw buffer response: %s", err, string(stdout))
}
for _, p := range pools {
if poolID == p.Number {
return p.Name, nil