2018-01-09 18:59:50 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package rbd
|
|
|
|
|
|
|
|
import (
|
2019-04-22 21:35:39 +00:00
|
|
|
"encoding/json"
|
2018-01-09 18:59:50 +00:00
|
|
|
"fmt"
|
2019-08-03 22:11:28 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2018-01-09 18:59:50 +00:00
|
|
|
"os/exec"
|
2019-08-03 22:11:28 +00:00
|
|
|
"path/filepath"
|
2018-01-09 18:59:50 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
2018-03-06 22:33:57 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
"github.com/ceph/ceph-csi/pkg/util"
|
2019-08-22 16:57:23 +00:00
|
|
|
"golang.org/x/net/context"
|
2019-04-22 21:35:39 +00:00
|
|
|
|
|
|
|
"github.com/golang/protobuf/ptypes"
|
|
|
|
"github.com/golang/protobuf/ptypes/timestamp"
|
2019-05-31 18:09:24 +00:00
|
|
|
"github.com/pborman/uuid"
|
2018-10-09 10:08:56 +00:00
|
|
|
"github.com/pkg/errors"
|
2018-07-20 08:46:44 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
2019-02-04 13:05:07 +00:00
|
|
|
"k8s.io/klog"
|
2018-01-09 18:59:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
imageWatcherStr = "watcher="
|
|
|
|
rbdImageFormat2 = "2"
|
|
|
|
// The following three values are used for 30 seconds timeout
|
|
|
|
// while waiting for RBD Watcher to expire.
|
|
|
|
rbdImageWatcherInitDelay = 1 * time.Second
|
|
|
|
rbdImageWatcherFactor = 1.4
|
|
|
|
rbdImageWatcherSteps = 10
|
2018-09-18 14:09:12 +00:00
|
|
|
rbdDefaultMounter = "rbd"
|
2019-08-06 16:59:40 +00:00
|
|
|
|
|
|
|
// Output strings returned during invocation of "ceph rbd task add remove <imagespec>" when
|
|
|
|
// command is not supported by ceph manager. Used to check errors and recover when the command
|
|
|
|
// is unsupported.
|
|
|
|
rbdTaskRemoveCmdInvalidString1 = "no valid command found"
|
|
|
|
rbdTaskRemoveCmdInvalidString2 = "Error EINVAL: invalid command"
|
2018-01-09 18:59:50 +00:00
|
|
|
)
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// rbdVolume represents a CSI volume and its RBD image specifics
|
2018-03-06 22:33:57 +00:00
|
|
|
type rbdVolume struct {
|
2019-05-31 18:09:24 +00:00
|
|
|
// RbdImageName is the name of the RBD image backing this rbdVolume. This does not have a
|
|
|
|
// JSON tag as it is not stashed in JSON encoded config maps in v1.0.0
|
2019-04-22 21:35:39 +00:00
|
|
|
// VolID is the volume ID that is exchanged with CSI drivers, identifying this rbdVol
|
2019-05-31 18:09:24 +00:00
|
|
|
// RequestName is the CSI generated volume name for the rbdVolume. This does not have a
|
|
|
|
// JSON tag as it is not stashed in JSON encoded config maps in v1.0.0
|
|
|
|
// VolName and MonValueFromSecret are retained from older plugin versions (<= 1.0.0)
|
|
|
|
// for backward compatibility reasons
|
2019-04-22 21:35:39 +00:00
|
|
|
RbdImageName string
|
2019-05-31 18:09:24 +00:00
|
|
|
VolID string `json:"volID"`
|
|
|
|
Monitors string `json:"monitors"`
|
|
|
|
Pool string `json:"pool"`
|
|
|
|
ImageFormat string `json:"imageFormat"`
|
|
|
|
ImageFeatures string `json:"imageFeatures"`
|
|
|
|
VolSize int64 `json:"volSize"`
|
|
|
|
AdminID string `json:"adminId"`
|
|
|
|
UserID string `json:"userId"`
|
|
|
|
Mounter string `json:"mounter"`
|
|
|
|
DisableInUseChecks bool `json:"disableInUseChecks"`
|
|
|
|
ClusterID string `json:"clusterId"`
|
2019-04-22 21:35:39 +00:00
|
|
|
RequestName string
|
2019-05-31 18:09:24 +00:00
|
|
|
VolName string `json:"volName"`
|
|
|
|
MonValueFromSecret string `json:"monValueFromSecret"`
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// rbdSnapshot represents a CSI snapshot and its RBD snapshot specifics
|
2018-08-08 05:42:17 +00:00
|
|
|
type rbdSnapshot struct {
|
2019-04-22 21:35:39 +00:00
|
|
|
// SourceVolumeID is the volume ID of RbdImageName, that is exchanged with CSI drivers
|
|
|
|
// RbdImageName is the name of the RBD image, that is this rbdSnapshot's source image
|
|
|
|
// RbdSnapName is the name of the RBD snapshot backing this rbdSnapshot
|
|
|
|
// SnapID is the snapshot ID that is exchanged with CSI drivers, identifying this rbdSnapshot
|
|
|
|
// RequestName is the CSI generated snapshot name for the rbdSnapshot
|
|
|
|
SourceVolumeID string
|
|
|
|
RbdImageName string
|
|
|
|
RbdSnapName string
|
|
|
|
SnapID string
|
|
|
|
Monitors string
|
|
|
|
Pool string
|
|
|
|
CreatedAt *timestamp.Timestamp
|
|
|
|
SizeBytes int64
|
|
|
|
ClusterID string
|
|
|
|
RequestName string
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2018-07-20 08:46:44 +00:00
|
|
|
var (
|
2018-10-17 12:52:45 +00:00
|
|
|
// serializes operations based on "volume name" as key
|
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
|
|
|
volumeNameLocker = util.NewIDLocker()
|
2018-10-17 12:52:45 +00:00
|
|
|
// serializes operations based on "snapshot name" as key
|
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
|
|
|
snapshotNameLocker = util.NewIDLocker()
|
2019-05-31 18:09:24 +00:00
|
|
|
// serializes delete operations on legacy volumes
|
|
|
|
legacyVolumeIDLocker = util.NewIDLocker()
|
2019-07-03 10:02:36 +00:00
|
|
|
// serializes operations based on "mount staging path" as key
|
|
|
|
nodeVolumeIDLocker = util.NewIDLocker()
|
|
|
|
// serializes operations based on "mount target path" as key
|
|
|
|
targetPathLocker = util.NewIDLocker()
|
2018-10-17 12:52:45 +00:00
|
|
|
|
2018-07-20 08:46:44 +00:00
|
|
|
supportedFeatures = sets.NewString("layering")
|
|
|
|
)
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// createImage creates a new ceph image with provision and volume options.
|
2019-08-22 16:57:23 +00:00
|
|
|
func createImage(ctx context.Context, pOpts *rbdVolume, volSz int64, cr *util.Credentials) error {
|
2018-01-09 18:59:50 +00:00
|
|
|
var output []byte
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
image := pOpts.RbdImageName
|
2019-03-01 12:08:17 +00:00
|
|
|
volSzMiB := fmt.Sprintf("%dM", volSz)
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
if pOpts.ImageFormat == rbdImageFormat2 {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: create %s size %s format %s (features: %s) using mon %s, pool %s"),
|
|
|
|
image, volSzMiB, pOpts.ImageFormat, pOpts.ImageFeatures, pOpts.Monitors, pOpts.Pool)
|
2018-01-09 18:59:50 +00:00
|
|
|
} else {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: create %s size %s format %s using mon %s, pool %s"), image, volSzMiB, pOpts.ImageFormat, pOpts.Monitors, pOpts.Pool)
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
args := []string{"create", image, "--size", volSzMiB, "--pool", pOpts.Pool, "--id", cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile, "--image-format", pOpts.ImageFormat}
|
2018-01-09 18:59:50 +00:00
|
|
|
if pOpts.ImageFormat == rbdImageFormat2 {
|
2018-07-20 08:46:44 +00:00
|
|
|
args = append(args, "--image-feature", pOpts.ImageFeatures)
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-06-01 21:26:42 +00:00
|
|
|
output, err := execCommand("rbd", args)
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2018-10-09 10:08:56 +00:00
|
|
|
return errors.Wrapf(err, "failed to create rbd image, command output: %s", string(output))
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// rbdStatus checks if there is watcher on the image.
|
2019-01-28 19:55:10 +00:00
|
|
|
// It returns true if there is a watcher on the image, otherwise returns false.
|
2019-08-22 16:57:23 +00:00
|
|
|
func rbdStatus(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) (bool, string, error) {
|
2018-01-09 18:59:50 +00:00
|
|
|
var output string
|
|
|
|
var cmd []byte
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
image := pOpts.RbdImageName
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: status %s using mon %s, pool %s"), image, pOpts.Monitors, pOpts.Pool)
|
2019-06-25 19:29:17 +00:00
|
|
|
args := []string{"status", image, "--pool", pOpts.Pool, "-m", pOpts.Monitors, "--id", cr.ID, "--keyfile=" + cr.KeyFile}
|
2019-06-01 21:26:42 +00:00
|
|
|
cmd, err := execCommand("rbd", args)
|
2018-01-09 18:59:50 +00:00
|
|
|
output = string(cmd)
|
|
|
|
|
|
|
|
if err, ok := err.(*exec.Error); ok {
|
|
|
|
if err.Err == exec.ErrNotFound {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "rbd cmd not found"))
|
2018-01-09 18:59:50 +00:00
|
|
|
// fail fast if command not found
|
|
|
|
return false, output, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If command never succeed, returns its last error.
|
|
|
|
if err != nil {
|
|
|
|
return false, output, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.Contains(output, imageWatcherStr) {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: watchers on %s: %s"), image, output)
|
2018-01-09 18:59:50 +00:00
|
|
|
return true, output, nil
|
|
|
|
}
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Warningf(util.Log(ctx, "rbd: no watchers on %s"), image)
|
2019-01-16 13:03:38 +00:00
|
|
|
return false, output, nil
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2019-08-06 16:59:40 +00:00
|
|
|
// rbdManagerTaskDelete adds a ceph manager task to delete an rbd image, thus deleting
|
|
|
|
// it asynchronously. If command is not found returns a bool set to false
|
2019-08-22 16:57:23 +00:00
|
|
|
func rbdManagerTaskDeleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) (bool, error) {
|
2019-08-06 16:59:40 +00:00
|
|
|
var output []byte
|
|
|
|
|
|
|
|
args := []string{"rbd", "task", "add", "remove",
|
|
|
|
pOpts.Pool + "/" + pOpts.RbdImageName,
|
|
|
|
"--id", cr.ID,
|
|
|
|
"--keyfile=" + cr.KeyFile,
|
|
|
|
"-m", pOpts.Monitors,
|
|
|
|
}
|
|
|
|
|
|
|
|
output, err := execCommand("ceph", args)
|
|
|
|
if err != nil {
|
|
|
|
if strings.Contains(string(output), rbdTaskRemoveCmdInvalidString1) &&
|
|
|
|
strings.Contains(string(output), rbdTaskRemoveCmdInvalidString2) {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Infof(util.Log(ctx, "cluster with cluster ID (%s) does not support Ceph manager based rbd image"+
|
|
|
|
" deletion (minimum ceph version required is v14.2.3)"), pOpts.ClusterID)
|
2019-08-06 16:59:40 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// deleteImage deletes a ceph image with provision and volume options.
|
2019-08-22 16:57:23 +00:00
|
|
|
func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) error {
|
2018-01-09 18:59:50 +00:00
|
|
|
var output []byte
|
2019-04-22 21:35:39 +00:00
|
|
|
|
|
|
|
image := pOpts.RbdImageName
|
2019-08-22 16:57:23 +00:00
|
|
|
found, _, err := rbdStatus(ctx, pOpts, cr)
|
2018-01-09 18:59:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if found {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Info(util.Log(ctx, "rbd is still being used "), image)
|
2018-01-09 18:59:50 +00:00
|
|
|
return fmt.Errorf("rbd %s is still being used", image)
|
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: rm %s using mon %s, pool %s"), image, pOpts.Monitors, pOpts.Pool)
|
2019-08-06 16:59:40 +00:00
|
|
|
|
|
|
|
// attempt to use Ceph manager based deletion support if available
|
2019-08-22 16:57:23 +00:00
|
|
|
rbdCephMgrSupported, err := rbdManagerTaskDeleteImage(ctx, pOpts, cr)
|
2019-08-06 16:59:40 +00:00
|
|
|
if !rbdCephMgrSupported {
|
|
|
|
// attempt older style deletion
|
|
|
|
args := []string{"rm", image, "--pool", pOpts.Pool, "--id", cr.ID, "-m", pOpts.Monitors,
|
|
|
|
"--keyfile=" + cr.KeyFile}
|
|
|
|
output, err = execCommand("rbd", args)
|
|
|
|
}
|
|
|
|
|
2018-09-21 14:38:50 +00:00
|
|
|
if err != nil {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed to delete rbd image: %v, command output: %s"), err, string(output))
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateSnapWithImageInfo updates provided rbdSnapshot with information from on-disk data
|
|
|
|
// regarding the same
|
2019-08-22 16:57:23 +00:00
|
|
|
func updateSnapWithImageInfo(ctx context.Context, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
|
|
|
|
snapInfo, err := getSnapInfo(ctx, rbdSnap.Monitors, cr, rbdSnap.Pool,
|
2019-06-01 21:26:42 +00:00
|
|
|
rbdSnap.RbdImageName, rbdSnap.RbdSnapName)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
rbdSnap.SizeBytes = snapInfo.Size
|
|
|
|
|
|
|
|
tm, err := time.Parse(time.ANSIC, snapInfo.Timestamp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-04-22 21:35:39 +00:00
|
|
|
|
|
|
|
rbdSnap.CreatedAt, err = ptypes.TimestampProto(tm)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateVolWithImageInfo updates provided rbdVolume with information from on-disk data
|
|
|
|
// regarding the same
|
2019-08-22 16:57:23 +00:00
|
|
|
func updateVolWithImageInfo(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
|
|
|
|
imageInfo, err := getImageInfo(ctx, rbdVol.Monitors, cr, rbdVol.Pool, rbdVol.RbdImageName)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if imageInfo.Format != 2 {
|
|
|
|
return fmt.Errorf("unknown or unsupported image format (%d) returned for image (%s)",
|
|
|
|
imageInfo.Format, rbdVol.RbdImageName)
|
|
|
|
}
|
|
|
|
rbdVol.ImageFormat = rbdImageFormat2
|
|
|
|
|
|
|
|
rbdVol.VolSize = imageInfo.Size
|
|
|
|
rbdVol.ImageFeatures = strings.Join(imageInfo.Features, ",")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// genSnapFromSnapID generates a rbdSnapshot structure from the provided identifier, updating
|
|
|
|
// the structure with elements from on-disk snapshot metadata as well
|
2019-08-22 16:57:23 +00:00
|
|
|
func genSnapFromSnapID(ctx context.Context, rbdSnap *rbdSnapshot, snapshotID string, cr *util.Credentials) error {
|
2019-04-22 21:35:39 +00:00
|
|
|
var (
|
|
|
|
options map[string]string
|
|
|
|
vi util.CSIIdentifier
|
|
|
|
)
|
|
|
|
options = make(map[string]string)
|
|
|
|
|
|
|
|
rbdSnap.SnapID = snapshotID
|
|
|
|
|
|
|
|
err := vi.DecomposeCSIID(rbdSnap.SnapID)
|
|
|
|
if err != nil {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "error decoding snapshot ID (%s) (%s)"), err, rbdSnap.SnapID)
|
2019-04-22 21:35:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
rbdSnap.ClusterID = vi.ClusterID
|
|
|
|
options["clusterID"] = rbdSnap.ClusterID
|
2019-05-14 19:15:01 +00:00
|
|
|
rbdSnap.RbdSnapName = snapJournal.NamingPrefix() + vi.ObjectUUID
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
rbdSnap.Monitors, _, err = getMonsAndClusterID(ctx, options)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
rbdSnap.Pool, err = util.GetPoolName(rbdSnap.Monitors, cr, vi.LocationID)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-05-14 19:15:01 +00:00
|
|
|
rbdSnap.RequestName, rbdSnap.RbdImageName, err = snapJournal.GetObjectUUIDData(rbdSnap.Monitors,
|
2019-06-01 21:26:42 +00:00
|
|
|
cr, rbdSnap.Pool, vi.ObjectUUID, true)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
err = updateSnapWithImageInfo(ctx, rbdSnap, cr)
|
2019-04-22 21:35:39 +00:00
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// genVolFromVolID generates a rbdVolume structure from the provided identifier, updating
|
|
|
|
// the structure with elements from on-disk image metadata as well
|
2019-08-22 16:57:23 +00:00
|
|
|
func genVolFromVolID(ctx context.Context, rbdVol *rbdVolume, volumeID string, cr *util.Credentials) error {
|
2019-04-22 21:35:39 +00:00
|
|
|
var (
|
|
|
|
options map[string]string
|
|
|
|
vi util.CSIIdentifier
|
|
|
|
)
|
|
|
|
options = make(map[string]string)
|
|
|
|
|
|
|
|
// rbdVolume fields that are not filled up in this function are:
|
|
|
|
// Mounter, MultiNodeWritable
|
|
|
|
rbdVol.VolID = volumeID
|
|
|
|
|
|
|
|
err := vi.DecomposeCSIID(rbdVol.VolID)
|
|
|
|
if err != nil {
|
2019-05-31 18:09:24 +00:00
|
|
|
err = fmt.Errorf("error decoding volume ID (%s) (%s)", err, rbdVol.VolID)
|
|
|
|
return ErrInvalidVolID{err}
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rbdVol.ClusterID = vi.ClusterID
|
|
|
|
options["clusterID"] = rbdVol.ClusterID
|
2019-05-14 19:15:01 +00:00
|
|
|
rbdVol.RbdImageName = volJournal.NamingPrefix() + vi.ObjectUUID
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
rbdVol.Monitors, _, err = getMonsAndClusterID(ctx, options)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
rbdVol.Pool, err = util.GetPoolName(rbdVol.Monitors, cr, vi.LocationID)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
rbdVol.RequestName, _, err = volJournal.GetObjectUUIDData(rbdVol.Monitors, cr,
|
|
|
|
rbdVol.Pool, vi.ObjectUUID, false)
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
err = updateVolWithImageInfo(ctx, rbdVol, cr)
|
2019-04-22 21:35:39 +00:00
|
|
|
|
2018-01-09 18:59:50 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func execCommand(command string, args []string) ([]byte, error) {
|
2019-01-28 19:55:10 +00:00
|
|
|
// #nosec
|
2018-01-09 18:59:50 +00:00
|
|
|
cmd := exec.Command(command, args...)
|
|
|
|
return cmd.CombinedOutput()
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func getMonsAndClusterID(ctx context.Context, options map[string]string) (monitors, clusterID string, err error) {
|
2019-03-02 17:29:52 +00:00
|
|
|
var ok bool
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
if clusterID, ok = options["clusterID"]; !ok {
|
|
|
|
err = errors.New("clusterID must be set")
|
|
|
|
return
|
|
|
|
}
|
2019-03-07 21:03:33 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
if monitors, err = util.Mons(csiConfigFile, clusterID); err != nil {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed getting mons (%s)"), err)
|
2019-08-06 09:54:53 +00:00
|
|
|
err = errors.Wrapf(err, "failed to fetch monitor list using clusterID (%s)", clusterID)
|
2019-04-22 21:35:39 +00:00
|
|
|
return
|
2019-03-02 17:29:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-05-31 18:09:24 +00:00
|
|
|
// isLegacyVolumeID checks if passed in volume ID string conforms to volume ID naming scheme used
|
|
|
|
// by the version 1.0.0 (legacy) of the plugin, and returns true if found to be conforming
|
|
|
|
func isLegacyVolumeID(volumeID string) bool {
|
|
|
|
// Version 1.0.0 volumeID format: "csi-rbd-vol-" + UUID string
|
|
|
|
// length: 12 ("csi-rbd-vol-") + 36 (UUID string)
|
|
|
|
|
|
|
|
// length check
|
|
|
|
if len(volumeID) != 48 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Header check
|
|
|
|
if !strings.HasPrefix(volumeID, "csi-rbd-vol-") {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trailer UUID format check
|
|
|
|
if uuid.Parse(volumeID[12:]) == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// upadateMons function is used to update the rbdVolume.Monitors for volumes that were provisioned
|
|
|
|
// using the 1.0.0 version (legacy) of the plugin.
|
|
|
|
func updateMons(rbdVol *rbdVolume, options, credentials map[string]string) error {
|
|
|
|
var ok bool
|
|
|
|
|
|
|
|
// read monitors and MonValueFromSecret from options, else check passed in rbdVolume for
|
|
|
|
// MonValueFromSecret key in credentials
|
|
|
|
monInSecret := ""
|
|
|
|
if options != nil {
|
|
|
|
if rbdVol.Monitors, ok = options["monitors"]; !ok {
|
|
|
|
rbdVol.Monitors = ""
|
|
|
|
}
|
|
|
|
if monInSecret, ok = options["monValueFromSecret"]; !ok {
|
|
|
|
monInSecret = ""
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
monInSecret = rbdVol.MonValueFromSecret
|
|
|
|
}
|
|
|
|
|
|
|
|
// if monitors are present in secrets and we have the credentials, use monitors from the
|
|
|
|
// credentials overriding monitors from other sources
|
|
|
|
if monInSecret != "" && credentials != nil {
|
|
|
|
monsFromSecret, ok := credentials[monInSecret]
|
|
|
|
if ok {
|
|
|
|
rbdVol.Monitors = monsFromSecret
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if rbdVol.Monitors == "" {
|
|
|
|
return errors.New("either monitors or monValueFromSecret must be set")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func genVolFromVolumeOptions(ctx context.Context, volOptions, credentials map[string]string, disableInUseChecks, isLegacyVolume bool) (*rbdVolume, error) {
|
2019-03-13 13:46:56 +00:00
|
|
|
var (
|
|
|
|
ok bool
|
|
|
|
err error
|
|
|
|
)
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2018-03-06 22:33:57 +00:00
|
|
|
rbdVol := &rbdVolume{}
|
|
|
|
rbdVol.Pool, ok = volOptions["pool"]
|
2018-01-09 18:59:50 +00:00
|
|
|
if !ok {
|
2019-03-07 12:56:47 +00:00
|
|
|
return nil, errors.New("missing required parameter pool")
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2019-05-31 18:09:24 +00:00
|
|
|
if isLegacyVolume {
|
|
|
|
err = updateMons(rbdVol, volOptions, credentials)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
2019-08-22 16:57:23 +00:00
|
|
|
rbdVol.Monitors, rbdVol.ClusterID, err = getMonsAndClusterID(ctx, volOptions)
|
2019-05-31 18:09:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2018-03-06 22:33:57 +00:00
|
|
|
rbdVol.ImageFormat, ok = volOptions["imageFormat"]
|
2018-01-18 19:13:08 +00:00
|
|
|
if !ok {
|
2018-07-20 08:46:44 +00:00
|
|
|
rbdVol.ImageFormat = rbdImageFormat2
|
|
|
|
}
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2018-07-20 08:46:44 +00:00
|
|
|
if rbdVol.ImageFormat == rbdImageFormat2 {
|
|
|
|
// if no image features is provided, it results in empty string
|
|
|
|
// which disable all RBD image format 2 features as we expected
|
2019-01-17 06:49:35 +00:00
|
|
|
imageFeatures, found := volOptions["imageFeatures"]
|
|
|
|
if found {
|
2018-07-20 08:46:44 +00:00
|
|
|
arr := strings.Split(imageFeatures, ",")
|
|
|
|
for _, f := range arr {
|
|
|
|
if !supportedFeatures.Has(f) {
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil, fmt.Errorf("invalid feature %q for volume csi-rbdplugin, supported"+
|
|
|
|
" features are: %v", f, supportedFeatures)
|
2018-07-20 08:46:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
rbdVol.ImageFeatures = imageFeatures
|
|
|
|
}
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-03-14 00:18:04 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(3).Infof(util.Log(ctx, "setting disableInUseChecks on rbd volume to: %v"), disableInUseChecks)
|
2019-03-14 00:18:04 +00:00
|
|
|
rbdVol.DisableInUseChecks = disableInUseChecks
|
|
|
|
|
2018-09-18 14:09:12 +00:00
|
|
|
rbdVol.Mounter, ok = volOptions["mounter"]
|
|
|
|
if !ok {
|
|
|
|
rbdVol.Mounter = rbdDefaultMounter
|
|
|
|
}
|
2019-06-01 21:26:42 +00:00
|
|
|
|
|
|
|
return rbdVol, nil
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func genSnapFromOptions(ctx context.Context, rbdVol *rbdVolume, snapOptions map[string]string) *rbdSnapshot {
|
2019-05-31 18:43:38 +00:00
|
|
|
var err error
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2018-08-08 05:42:17 +00:00
|
|
|
rbdSnap := &rbdSnapshot{}
|
2019-05-31 18:43:38 +00:00
|
|
|
rbdSnap.Pool = rbdVol.Pool
|
2019-03-02 17:29:52 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
rbdSnap.Monitors, rbdSnap.ClusterID, err = getMonsAndClusterID(ctx, snapOptions)
|
2019-03-02 17:29:52 +00:00
|
|
|
if err != nil {
|
2019-04-22 21:35:39 +00:00
|
|
|
rbdSnap.Monitors = rbdVol.Monitors
|
|
|
|
rbdSnap.ClusterID = rbdVol.ClusterID
|
2018-08-09 13:07:00 +00:00
|
|
|
}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return rbdSnap
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2018-08-09 13:07:13 +00:00
|
|
|
func hasSnapshotFeature(imageFeatures string) bool {
|
|
|
|
arr := strings.Split(imageFeatures, ",")
|
|
|
|
for _, f := range arr {
|
|
|
|
if f == "layering" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func protectSnapshot(ctx context.Context, pOpts *rbdSnapshot, cr *util.Credentials) error {
|
2018-08-08 05:42:17 +00:00
|
|
|
var output []byte
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
image := pOpts.RbdImageName
|
|
|
|
snapName := pOpts.RbdSnapName
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: snap protect %s using mon %s, pool %s "), image, pOpts.Monitors, pOpts.Pool)
|
2019-04-22 21:35:39 +00:00
|
|
|
args := []string{"snap", "protect", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
|
2019-06-25 19:29:17 +00:00
|
|
|
cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
output, err := execCommand("rbd", args)
|
2018-08-08 05:42:17 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2018-10-09 10:08:56 +00:00
|
|
|
return errors.Wrapf(err, "failed to protect snapshot, command output: %s", string(output))
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func createSnapshot(ctx context.Context, pOpts *rbdSnapshot, cr *util.Credentials) error {
|
2019-04-22 21:35:39 +00:00
|
|
|
var output []byte
|
2019-02-18 08:22:52 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
image := pOpts.RbdImageName
|
|
|
|
snapName := pOpts.RbdSnapName
|
2019-02-18 08:22:52 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: snap create %s using mon %s, pool %s"), image, pOpts.Monitors, pOpts.Pool)
|
2019-04-22 21:35:39 +00:00
|
|
|
args := []string{"snap", "create", "--pool", pOpts.Pool, "--snap", snapName, image,
|
2019-06-25 19:29:17 +00:00
|
|
|
"--id", cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile}
|
2019-02-18 08:22:52 +00:00
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
output, err := execCommand("rbd", args)
|
2019-02-18 08:22:52 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "failed to create snapshot, command output: %s", string(output))
|
2019-02-18 08:22:52 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return nil
|
2019-02-18 08:22:52 +00:00
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func unprotectSnapshot(ctx context.Context, pOpts *rbdSnapshot, cr *util.Credentials) error {
|
2018-08-08 05:42:17 +00:00
|
|
|
var output []byte
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
image := pOpts.RbdImageName
|
|
|
|
snapName := pOpts.RbdSnapName
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: snap unprotect %s using mon %s, pool %s"), image, pOpts.Monitors, pOpts.Pool)
|
2019-04-22 21:35:39 +00:00
|
|
|
args := []string{"snap", "unprotect", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
|
2019-06-25 19:29:17 +00:00
|
|
|
cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
output, err := execCommand("rbd", args)
|
2018-08-08 05:42:17 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2019-04-22 21:35:39 +00:00
|
|
|
return errors.Wrapf(err, "failed to unprotect snapshot, command output: %s", string(output))
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func deleteSnapshot(ctx context.Context, pOpts *rbdSnapshot, cr *util.Credentials) error {
|
2018-08-08 05:42:17 +00:00
|
|
|
var output []byte
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
image := pOpts.RbdImageName
|
|
|
|
snapName := pOpts.RbdSnapName
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: snap rm %s using mon %s, pool %s"), image, pOpts.Monitors, pOpts.Pool)
|
2019-04-22 21:35:39 +00:00
|
|
|
args := []string{"snap", "rm", "--pool", pOpts.Pool, "--snap", snapName, image, "--id",
|
2019-06-25 19:29:17 +00:00
|
|
|
cr.ID, "-m", pOpts.Monitors, "--keyfile=" + cr.KeyFile}
|
2018-09-21 14:38:50 +00:00
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
output, err := execCommand("rbd", args)
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "failed to delete snapshot, command output: %s", string(output))
|
|
|
|
}
|
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
if err := undoSnapReservation(pOpts, cr); err != nil {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) on image (%s) (%s)"),
|
2019-04-22 21:35:39 +00:00
|
|
|
pOpts.RequestName, pOpts.RbdSnapName, pOpts.RbdImageName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func restoreSnapshot(ctx context.Context, pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, cr *util.Credentials) error {
|
2019-04-22 21:35:39 +00:00
|
|
|
var output []byte
|
|
|
|
|
|
|
|
image := pVolOpts.RbdImageName
|
|
|
|
snapName := pSnapOpts.RbdSnapName
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "rbd: clone %s using mon %s, pool %s"), image, pVolOpts.Monitors, pVolOpts.Pool)
|
2019-04-22 21:35:39 +00:00
|
|
|
args := []string{"clone", pSnapOpts.Pool + "/" + pSnapOpts.RbdImageName + "@" + snapName,
|
2019-06-25 19:29:17 +00:00
|
|
|
pVolOpts.Pool + "/" + image, "--id", cr.ID, "-m", pVolOpts.Monitors, "--keyfile=" + cr.KeyFile}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-06-01 21:26:42 +00:00
|
|
|
output, err := execCommand("rbd", args)
|
2018-08-08 05:42:17 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2019-04-22 21:35:39 +00:00
|
|
|
return errors.Wrapf(err, "failed to restore snapshot, command output: %s", string(output))
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// getSnapshotMetadata fetches on-disk metadata about the snapshot and populates the passed in
|
|
|
|
// rbdSnapshot structure
|
2019-08-22 16:57:23 +00:00
|
|
|
func getSnapshotMetadata(ctx context.Context, pSnapOpts *rbdSnapshot, cr *util.Credentials) error {
|
2019-04-22 21:35:39 +00:00
|
|
|
imageName := pSnapOpts.RbdImageName
|
|
|
|
snapName := pSnapOpts.RbdSnapName
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
snapInfo, err := getSnapInfo(ctx, pSnapOpts.Monitors, cr, pSnapOpts.Pool, imageName, snapName)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
pSnapOpts.SizeBytes = snapInfo.Size
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
tm, err := time.Parse(time.ANSIC, snapInfo.Timestamp)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2019-04-22 21:35:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
pSnapOpts.CreatedAt, err = ptypes.TimestampProto(tm)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// imageInfo strongly typed JSON spec for image info
|
|
|
|
type imageInfo struct {
|
|
|
|
ObjectUUID string `json:"name"`
|
|
|
|
Size int64 `json:"size"`
|
|
|
|
Format int64 `json:"format"`
|
|
|
|
Features []string `json:"features"`
|
|
|
|
CreatedAt string `json:"create_timestamp"`
|
|
|
|
}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
// getImageInfo queries rbd about the given image and returns its metadata, and returns
|
|
|
|
// ErrImageNotFound if provided image is not found
|
2019-08-22 16:57:23 +00:00
|
|
|
func getImageInfo(ctx context.Context, monitors string, cr *util.Credentials, poolName, imageName string) (imageInfo, error) {
|
2019-04-22 21:35:39 +00:00
|
|
|
// rbd --format=json info [image-spec | snap-spec]
|
|
|
|
|
|
|
|
var imgInfo imageInfo
|
|
|
|
|
2019-07-13 12:00:47 +00:00
|
|
|
stdout, stderr, err := util.ExecCommand(
|
2019-04-22 21:35:39 +00:00
|
|
|
"rbd",
|
|
|
|
"-m", monitors,
|
2019-06-01 21:26:42 +00:00
|
|
|
"--id", cr.ID,
|
2019-06-25 19:29:17 +00:00
|
|
|
"--keyfile="+cr.KeyFile,
|
2019-04-22 21:35:39 +00:00
|
|
|
"-c", util.CephConfigPath,
|
|
|
|
"--format="+"json",
|
|
|
|
"info", poolName+"/"+imageName)
|
2018-09-21 14:38:50 +00:00
|
|
|
if err != nil {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed getting information for image (%s): (%s)"), poolName+"/"+imageName, err)
|
2019-07-13 12:00:47 +00:00
|
|
|
if strings.Contains(string(stderr), "rbd: error opening image "+imageName+
|
2019-04-22 21:35:39 +00:00
|
|
|
": (2) No such file or directory") {
|
|
|
|
return imgInfo, ErrImageNotFound{imageName, err}
|
|
|
|
}
|
|
|
|
return imgInfo, err
|
2018-09-21 14:38:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
err = json.Unmarshal(stdout, &imgInfo)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed to parse JSON output of image info (%s): (%s)"),
|
2019-04-22 21:35:39 +00:00
|
|
|
poolName+"/"+imageName, err)
|
|
|
|
return imgInfo, fmt.Errorf("unmarshal failed: %+v. raw buffer response: %s",
|
|
|
|
err, string(stdout))
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
return imgInfo, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// snapInfo strongly typed JSON spec for snap ls rbd output
|
|
|
|
type snapInfo struct {
|
|
|
|
ID int64 `json:"id"`
|
|
|
|
Name string `json:"name"`
|
|
|
|
Size int64 `json:"size"`
|
|
|
|
Timestamp string `json:"timestamp"`
|
|
|
|
}
|
2018-08-08 05:42:17 +00:00
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
/*
|
|
|
|
getSnapInfo queries rbd about the snapshots of the given image and returns its metadata, and
|
|
|
|
returns ErrImageNotFound if provided image is not found, and ErrSnapNotFound if provided snap
|
|
|
|
is not found in the images snapshot list
|
|
|
|
*/
|
2019-08-22 16:57:23 +00:00
|
|
|
func getSnapInfo(ctx context.Context, monitors string, cr *util.Credentials, poolName, imageName, snapName string) (snapInfo, error) {
|
2019-04-22 21:35:39 +00:00
|
|
|
// rbd --format=json snap ls [image-spec]
|
|
|
|
|
|
|
|
var (
|
|
|
|
snpInfo snapInfo
|
|
|
|
snaps []snapInfo
|
|
|
|
)
|
|
|
|
|
2019-07-13 12:00:47 +00:00
|
|
|
stdout, stderr, err := util.ExecCommand(
|
2019-04-22 21:35:39 +00:00
|
|
|
"rbd",
|
|
|
|
"-m", monitors,
|
2019-06-01 21:26:42 +00:00
|
|
|
"--id", cr.ID,
|
2019-06-25 19:29:17 +00:00
|
|
|
"--keyfile="+cr.KeyFile,
|
2019-04-22 21:35:39 +00:00
|
|
|
"-c", util.CephConfigPath,
|
|
|
|
"--format="+"json",
|
|
|
|
"snap", "ls", poolName+"/"+imageName)
|
2018-08-08 05:42:17 +00:00
|
|
|
if err != nil {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed getting snap (%s) information from image (%s): (%s)"),
|
2019-04-22 21:35:39 +00:00
|
|
|
snapName, poolName+"/"+imageName, err)
|
2019-07-13 12:00:47 +00:00
|
|
|
if strings.Contains(string(stderr), "rbd: error opening image "+imageName+
|
2019-04-22 21:35:39 +00:00
|
|
|
": (2) No such file or directory") {
|
|
|
|
return snpInfo, ErrImageNotFound{imageName, err}
|
|
|
|
}
|
|
|
|
return snpInfo, err
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
err = json.Unmarshal(stdout, &snaps)
|
|
|
|
if err != nil {
|
2019-08-22 16:57:23 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed to parse JSON output of image snap list (%s): (%s)"),
|
2019-04-22 21:35:39 +00:00
|
|
|
poolName+"/"+imageName, err)
|
|
|
|
return snpInfo, fmt.Errorf("unmarshal failed: %+v. raw buffer response: %s",
|
|
|
|
err, string(stdout))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, snap := range snaps {
|
|
|
|
if snap.Name == snapName {
|
|
|
|
return snap, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return snpInfo, ErrSnapNotFound{snapName, fmt.Errorf("snap (%s) for image (%s) not found",
|
|
|
|
snapName, poolName+"/"+imageName)}
|
2018-08-08 05:42:17 +00:00
|
|
|
}
|
2019-08-03 22:11:28 +00:00
|
|
|
|
|
|
|
// rbdImageMetadataStash strongly typed JSON spec for stashed RBD image metadata
|
|
|
|
type rbdImageMetadataStash struct {
|
|
|
|
Version int `json:"Version"`
|
|
|
|
Pool string `json:"pool"`
|
|
|
|
ImageName string `json:"image"`
|
|
|
|
NbdAccess bool `json:"accessType"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// file name in which image metadata is stashed
|
|
|
|
const stashFileName = "image-meta.json"
|
|
|
|
|
|
|
|
// stashRBDImageMetadata stashes required fields into the stashFileName at the passed in path, in
|
|
|
|
// JSON format
|
|
|
|
func stashRBDImageMetadata(volOptions *rbdVolume, path string) error {
|
|
|
|
var imgMeta = rbdImageMetadataStash{
|
|
|
|
Version: 1, // Stash a v1 for now, in case of changes later, there are no checks for this at present
|
|
|
|
Pool: volOptions.Pool,
|
|
|
|
ImageName: volOptions.RbdImageName,
|
|
|
|
}
|
|
|
|
|
|
|
|
imgMeta.NbdAccess = false
|
|
|
|
if volOptions.Mounter == rbdTonbd && hasNBD {
|
|
|
|
imgMeta.NbdAccess = true
|
|
|
|
}
|
|
|
|
|
|
|
|
encodedBytes, err := json.Marshal(imgMeta)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to marshall JSON image metadata for image (%s) in pool (%s): (%v)",
|
|
|
|
volOptions.RbdImageName, volOptions.Pool, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fPath := filepath.Join(path, stashFileName)
|
|
|
|
err = ioutil.WriteFile(fPath, encodedBytes, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to stash JSON image metadata for image (%s) in pool (%s) at path (%s): (%v)",
|
|
|
|
volOptions.RbdImageName, volOptions.Pool, fPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// lookupRBDImageMetadataStash reads and returns stashed image metadata at passed in path
|
|
|
|
func lookupRBDImageMetadataStash(path string) (rbdImageMetadataStash, error) {
|
|
|
|
var imgMeta rbdImageMetadataStash
|
|
|
|
|
|
|
|
fPath := filepath.Join(path, stashFileName)
|
|
|
|
encodedBytes, err := ioutil.ReadFile(fPath)
|
|
|
|
if err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return imgMeta, fmt.Errorf("failed to read stashed JSON image metadata from path (%s): (%v)", fPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return imgMeta, ErrMissingStash{err}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = json.Unmarshal(encodedBytes, &imgMeta)
|
|
|
|
if err != nil {
|
|
|
|
return imgMeta, fmt.Errorf("failed to unmarshall stashed JSON image metadata from path (%s): (%v)", fPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return imgMeta, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// cleanupRBDImageMetadataStash cleans up any stashed metadata at passed in path
|
|
|
|
func cleanupRBDImageMetadataStash(path string) error {
|
|
|
|
fPath := filepath.Join(path, stashFileName)
|
|
|
|
if err := os.Remove(fPath); err != nil {
|
|
|
|
return fmt.Errorf("failed to cleanup stashed JSON data (%s): (%v)", fPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|