vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -23,14 +23,16 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
@ -38,8 +40,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["rbd_test.go"],
importpath = "k8s.io/kubernetes/pkg/volume/rbd",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// NewAttacher implements AttachableVolumePlugin.NewAttacher.
@ -39,7 +38,7 @@ func (plugin *rbdPlugin) newAttacherInternal(manager diskManager) (volume.Attach
return &rbdAttacher{
plugin: plugin,
manager: manager,
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
@ -52,7 +51,7 @@ func (plugin *rbdPlugin) newDetacherInternal(manager diskManager) (volume.Detach
return &rbdDetacher{
plugin: plugin,
manager: manager,
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
@ -154,7 +153,7 @@ func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, d
if ro {
options = append(options, "ro")
}
mountOptions := volume.MountOptionFromSpec(spec, options...)
mountOptions := volutil.MountOptionFromSpec(spec, options...)
err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions)
if err != nil {
os.Remove(deviceMountPath)

View File

@ -31,18 +31,23 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// Abstract interface to disk operations.
type diskManager interface {
// MakeGlobalPDName creates global persistent disk path.
MakeGlobalPDName(disk rbd) string
// MakeGlobalVDPDName creates global block disk path.
MakeGlobalVDPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine.
// If it successfully attaches, the path to the device
// is returned. Otherwise, an error will be returned.
AttachDisk(disk rbdMounter) (string, error)
// Detaches the disk from the kubelet's host machine.
DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error
// Detaches the block disk from the kubelet's host machine.
DetachBlockDisk(disk rbdDiskUnmapper, mntPath string) error
// Creates a rbd image.
CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error)
// Deletes a rbd image.
@ -81,7 +86,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.
if (&b).GetAttributes().ReadOnly {
options = append(options, "ro")
}
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
mountOptions := util.JoinMountOptions(b.mountOptions, options)
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
if err != nil {
glog.Errorf("failed to bind mount:%s", globalPDPath)
@ -115,9 +120,9 @@ func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter m
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
if mntErr != nil && !os.IsNotExist(mntErr) {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
return mntErr
}
if notMnt {
if err := os.Remove(volPath); err != nil {

View File

@ -18,6 +18,9 @@ package rbd
import (
"fmt"
"os"
"path/filepath"
"regexp"
dstrings "strings"
"github.com/golang/glog"
@ -32,7 +35,7 @@ import (
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
var (
@ -55,6 +58,7 @@ var _ volume.DeletableVolumePlugin = &rbdPlugin{}
var _ volume.ProvisionableVolumePlugin = &rbdPlugin{}
var _ volume.AttachableVolumePlugin = &rbdPlugin{}
var _ volume.ExpandableVolumePlugin = &rbdPlugin{}
var _ volume.BlockVolumePlugin = &rbdPlugin{}
const (
rbdPluginName = "kubernetes.io/rbd"
@ -81,7 +85,7 @@ func (plugin *rbdPlugin) GetPluginName() string {
}
func (plugin *rbdPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
mon, err := getVolumeSourceMonitors(spec)
pool, err := getVolumeSourcePool(spec)
if err != nil {
return "", err
}
@ -92,16 +96,12 @@ func (plugin *rbdPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
return fmt.Sprintf(
"%v:%v",
mon,
pool,
img), nil
}
func (plugin *rbdPlugin) CanSupport(spec *volume.Spec) bool {
if (spec.Volume != nil && spec.Volume.RBD == nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil) {
return false
}
return true
return (spec.Volume != nil && spec.Volume.RBD != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD != nil)
}
func (plugin *rbdPlugin) RequiresRemount() bool {
@ -244,7 +244,7 @@ func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, po
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
return nil, err
@ -327,7 +327,7 @@ func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
Keyring: keyring,
Secret: secret,
fsType: fstype,
mountOptions: volume.MountOptionFromSpec(spec),
mountOptions: volutil.MountOptionFromSpec(spec),
}, nil
}
@ -346,17 +346,184 @@ func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID,
}
func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
s := dstrings.Split(sourceName, "-image-")
if len(s) != 2 {
// The mountPath parameter is the volume mount path for a specific pod, its format
// is /var/lib/kubelet/pods/{podUID}/volumes/{volumePluginName}/{volumeName}.
// mounter.GetDeviceNameFromMount will find the device path(such as /dev/rbd0) by
// mountPath first, and then try to find the global device mount path from the mounted
// path list of this device. sourceName is extracted from this global device mount path.
// mounter.GetDeviceNameFromMount expects the global device mount path conforms to canonical
// format: /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/{pool}-image-{image}.
// If this assertion failed, it means that the global device mount path is created by
// the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/{pool}-image-{image}.
// So we will try to check whether this old style global device mount path exist or not.
// If existed, extract the sourceName from this old style path, otherwise return an error.
glog.V(3).Infof("SourceName %s wrong, fallback to old format", sourceName)
sourceName, err = plugin.getDeviceNameFromOldMountPath(mounter, mountPath)
if err != nil {
return nil, err
}
s = dstrings.Split(sourceName, "-image-")
if len(s) != 2 {
return nil, fmt.Errorf("sourceName %s wrong, should be pool+\"-image-\"+imageName", sourceName)
}
}
rbdVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{},
RBDPool: s[0],
RBDImage: s[1],
},
},
}
return volume.NewSpecFromVolume(rbdVolume), nil
}
func (plugin *rbdPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(rbdPluginName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
return nil, err
}
glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) == 1 {
return nil, fmt.Errorf("failed to retrieve volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
}
return getVolumeSpecFromGlobalMapPath(globalMapPath)
}
func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) {
// Retrieve volume spec information from globalMapPath
// globalMapPath example:
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}
pool, image, err := getPoolAndImageFromMapPath(globalMapPath)
if err != nil {
return nil, err
}
block := v1.PersistentVolumeBlock
rbdVolume := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
RBDImage: image,
RBDPool: pool,
},
},
VolumeMode: &block,
},
}
return volume.NewSpecFromPersistentVolume(rbdVolume, true), nil
}
func (plugin *rbdPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
var uid types.UID
if pod != nil {
uid = pod.UID
}
secret := ""
// var err error
if pod != nil {
secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace)
if err != nil {
return nil, err
}
if len(secretName) > 0 && len(secretNs) > 0 {
// if secret is provideded, retrieve it
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
return nil, err
}
for _, data := range secrets.Data {
secret = string(data)
}
}
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &RBDUtil{}, secret, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *rbdPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, secret string, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) {
mon, err := getVolumeSourceMonitors(spec)
if err != nil {
return nil, err
}
img, err := getVolumeSourceImage(spec)
if err != nil {
return nil, err
}
pool, err := getVolumeSourcePool(spec)
if err != nil {
return nil, err
}
id, err := getVolumeSourceUser(spec)
if err != nil {
return nil, err
}
keyring, err := getVolumeSourceKeyRing(spec)
if err != nil {
return nil, err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return nil, err
}
return &rbdDiskMapper{
rbd: newRBD(podUID, spec.Name(), img, pool, ro, plugin, manager),
mon: mon,
id: id,
keyring: keyring,
secret: secret,
}, nil
}
func (plugin *rbdPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &RBDUtil{})
}
func (plugin *rbdPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) {
return &rbdDiskUnmapper{
rbdDiskMapper: &rbdDiskMapper{
rbd: newRBD(podUID, volName, "", "", false, plugin, manager),
mon: make([]string, 0),
},
}, nil
}
func (plugin *rbdPlugin) getDeviceNameFromOldMountPath(mounter mount.Interface, mountPath string) (string, error) {
refs, err := mount.GetMountRefsByDev(mounter, mountPath)
if err != nil {
return "", err
}
// baseMountPath is the prefix of deprecated device global mounted path,
// such as: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd
baseMountPath := filepath.Join(plugin.host.GetPluginDir(rbdPluginName), "rbd")
for _, ref := range refs {
if dstrings.HasPrefix(ref, baseMountPath) {
return filepath.Rel(baseMountPath, ref)
}
}
return "", fmt.Errorf("can't find source name from mounted path: %s", mountPath)
}
func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil")
@ -402,7 +569,7 @@ type rbdVolumeProvisioner struct {
var _ volume.Provisioner = &rbdVolumeProvisioner{}
func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) {
if !volutil.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", r.options.PVC.Spec.AccessModes, r.plugin.GetAccessModes())
}
@ -415,6 +582,7 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
secret := ""
secretName := ""
secretNamespace := ""
keyring := ""
imageFormat := rbdImageFormat2
fstype := ""
@ -439,6 +607,8 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
secretName = v
case "usersecretnamespace":
secretNamespace = v
case "keyring":
keyring = v
case "imageformat":
imageFormat = v
case "imagefeatures":
@ -472,8 +642,8 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if len(r.Mon) < 1 {
return nil, fmt.Errorf("missing Ceph monitors")
}
if secretName == "" {
return nil, fmt.Errorf("missing user secret name")
if secretName == "" && keyring == "" {
return nil, fmt.Errorf("must specify either keyring or user secret name")
}
if r.adminId == "" {
r.adminId = rbdDefaultAdminId
@ -495,10 +665,20 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
}
glog.Infof("successfully created rbd image %q", image)
pv := new(v1.PersistentVolume)
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volumehelper.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner")
rbd.SecretRef = new(v1.SecretReference)
rbd.SecretRef.Name = secretName
rbd.SecretRef.Namespace = secretNamespace
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volutil.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner")
if secretName != "" {
rbd.SecretRef = new(v1.SecretReference)
rbd.SecretRef.Name = secretName
rbd.SecretRef.Namespace = secretNamespace
} else {
var filePathRegex = regexp.MustCompile(`^(?:/[^/!;` + "`" + ` ]+)+$`)
if keyring != "" && !filePathRegex.MatchString(keyring) {
return nil, fmt.Errorf("keyring field must contain a path to a file")
}
rbd.Keyring = keyring
}
rbd.RadosUser = r.Id
rbd.FSType = fstype
pv.Spec.PersistentVolumeSource.RBD = rbd
@ -561,7 +741,7 @@ func newRBD(podUID types.UID, volName string, image string, pool string, readOnl
Pool: pool,
ReadOnly: readOnly,
plugin: plugin,
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
exec: plugin.host.GetExec(plugin.GetPluginName()),
manager: manager,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
@ -650,6 +830,140 @@ func (c *rbdUnmounter) TearDownAt(dir string) error {
return nil
}
var _ volume.BlockVolumeMapper = &rbdDiskMapper{}
type rbdDiskMapper struct {
*rbd
mon []string
id string
keyring string
secret string
adminSecret string
adminId string
imageFormat string
imageFeatures []string
}
var _ volume.BlockVolumeUnmapper = &rbdDiskUnmapper{}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{rbd pool}-image-{rbd image-name}/{podUid}
func (rbd *rbd) GetGlobalMapPath(spec *volume.Spec) (string, error) {
return rbd.rbdGlobalMapPath(spec)
}
// GetPodDeviceMapPath returns pod device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~rbd
// volumeName: pv0001
func (rbd *rbd) GetPodDeviceMapPath() (string, string) {
return rbd.rbdPodDeviceMapPath()
}
func (rbd *rbdDiskMapper) SetUpDevice() (string, error) {
return "", nil
}
func (rbd *rbd) rbdGlobalMapPath(spec *volume.Spec) (string, error) {
var err error
mon, err := getVolumeSourceMonitors(spec)
if err != nil {
return "", err
}
img, err := getVolumeSourceImage(spec)
if err != nil {
return "", err
}
pool, err := getVolumeSourcePool(spec)
if err != nil {
return "", err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return "", err
}
mounter := &rbdMounter{
rbd: newRBD("", spec.Name(), img, pool, ro, rbd.plugin, &RBDUtil{}),
Mon: mon,
}
return rbd.manager.MakeGlobalVDPDName(*mounter.rbd), nil
}
func (rbd *rbd) rbdPodDeviceMapPath() (string, string) {
name := rbdPluginName
return rbd.plugin.host.GetPodVolumeDeviceDir(rbd.podUID, strings.EscapeQualifiedNameForDisk(name)), rbd.volName
}
type rbdDiskUnmapper struct {
*rbdDiskMapper
}
func getPoolAndImageFromMapPath(mapPath string) (string, string, error) {
pathParts := dstrings.Split(mapPath, "/")
if len(pathParts) < 2 {
return "", "", fmt.Errorf("corrupted mapPath")
}
rbdParts := dstrings.Split(pathParts[len(pathParts)-1], "-image-")
if len(rbdParts) < 2 {
return "", "", fmt.Errorf("corrupted mapPath")
}
return string(rbdParts[0]), string(rbdParts[1]), nil
}
func getBlockVolumeDevice(mapPath string) (string, error) {
pool, image, err := getPoolAndImageFromMapPath(mapPath)
if err != nil {
return "", err
}
// Getting full device path
device, found := getDevFromImageAndPool(pool, image)
if !found {
return "", err
}
return device, nil
}
func (rbd *rbdDiskUnmapper) TearDownDevice(mapPath, _ string) error {
device, err := getBlockVolumeDevice(mapPath)
if err != nil {
return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err)
}
// Get loopback device which takes fd lock for device beofore detaching a volume from node.
// TODO: This is a workaround for issue #54108
// Currently local attach plugins such as FC, iSCSI, RBD can't obtain devicePath during
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
// local attach plugins needs to remove loopback device during TearDownDevice().
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, device)
if err != nil {
return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err)
}
// Remove loop device before detaching volume since volume detach operation gets busy if volume is opened by loopback.
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
if err != nil {
return fmt.Errorf("rbd: failed to remove loopback :%v, err: %v", loop, err)
}
glog.V(4).Infof("rbd: successfully removed loop device: %s", loop)
err = rbd.manager.DetachBlockDisk(*rbd, mapPath)
if err != nil {
return fmt.Errorf("rbd: failed to detach disk: %s\nError: %v", mapPath, err)
}
glog.V(4).Infof("rbd: %q is unmapped, deleting the directory", mapPath)
err = os.RemoveAll(mapPath)
if err != nil {
return fmt.Errorf("rbd: failed to delete the directory: %s\nError: %v", mapPath, err)
}
glog.V(4).Infof("rbd: successfully detached disk: %s", mapPath)
return nil
}
func getVolumeSourceMonitors(spec *volume.Spec) ([]string, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD.CephMonitors, nil

View File

@ -55,9 +55,24 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/rbd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{RBD: &v1.RBDVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{RBD: &v1.RBDPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
type fakeDiskManager struct {
@ -81,10 +96,14 @@ func (fake *fakeDiskManager) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) MakeGlobalVDPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) AttachDisk(b rbdMounter) (string, error) {
fake.mutex.Lock()
defer fake.mutex.Unlock()
fake.rbdMapIndex += 1
fake.rbdMapIndex++
devicePath := fmt.Sprintf("/dev/rbd%d", fake.rbdMapIndex)
fake.rbdDevices[devicePath] = true
return devicePath, nil
@ -101,6 +120,17 @@ func (fake *fakeDiskManager) DetachDisk(r *rbdPlugin, deviceMountPath string, de
return nil
}
func (fake *fakeDiskManager) DetachBlockDisk(r rbdDiskUnmapper, device string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
ok := fake.rbdDevices[device]
if !ok {
return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device)
}
delete(fake.rbdDevices, device)
return nil
}
func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error) {
return nil, 0, fmt.Errorf("not implemented")
}
@ -310,7 +340,7 @@ func TestPlugin(t *testing.T) {
},
},
expectedDevicePath: "/dev/rbd1",
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool1-image-image1", tmpDir),
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool1-image-image1", tmpDir),
expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol1", tmpDir, podUID),
})
cases = append(cases, &testcase{
@ -338,7 +368,7 @@ func TestPlugin(t *testing.T) {
},
},
expectedDevicePath: "/dev/rbd1",
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool2-image-image2", tmpDir),
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool2-image-image2", tmpDir),
expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol2", tmpDir, podUID),
})
@ -435,3 +465,127 @@ func TestGetSecretNameAndNamespace(t *testing.T) {
t.Errorf("getSecretNameAndNamespace returned incorrect values, expected %s and %s but got %s and %s", secretName, secretNamespace, foundSecretName, foundSecretNamespace)
}
}
// https://github.com/kubernetes/kubernetes/issues/57744
func TestGetDeviceMountPath(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fdm := NewFakeDiskManager()
// attacher
attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
pool, image := "pool", "image"
spec := volume.NewSpecFromVolume(&v1.Volume{
Name: "vol",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: pool,
RBDImage: image,
FSType: "ext4",
},
},
})
deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image)
canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image)
type testCase struct {
deprecated bool
targetPath string
}
for _, c := range []testCase{
{false, canonicalDir},
{true, deprecatedDir},
} {
if c.deprecated {
// This is a deprecated device mount path, we create it,
// and hope attacher.GetDeviceMountPath return c.targetPath.
if err := os.MkdirAll(c.targetPath, 0700); err != nil {
t.Fatalf("Create deprecated mount path failed: %v", err)
}
}
mountPath, err := attacher.GetDeviceMountPath(spec)
if err != nil {
t.Fatalf("GetDeviceMountPath failed: %v", err)
}
if mountPath != c.targetPath {
t.Errorf("Mismatch device mount path: wanted %s, got %s", c.targetPath, mountPath)
}
}
}
// https://github.com/kubernetes/kubernetes/issues/57744
func TestConstructVolumeSpec(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter)
pool, image, volumeName := "pool", "image", "vol"
podMountPath := fmt.Sprintf("%s/pods/pod123/volumes/kubernetes.io~rbd/%s", tmpDir, volumeName)
deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image)
canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image)
type testCase struct {
volumeName string
targetPath string
}
for _, c := range []testCase{
{"vol", canonicalDir},
{"vol", deprecatedDir},
} {
if err := os.MkdirAll(c.targetPath, 0700); err != nil {
t.Fatalf("Create mount path %s failed: %v", c.targetPath, err)
}
if err = fakeMounter.Mount("/dev/rbd0", c.targetPath, "fake", nil); err != nil {
t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err)
}
if err = fakeMounter.Mount(c.targetPath, podMountPath, "fake", []string{"bind"}); err != nil {
t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err)
}
spec, err := plug.ConstructVolumeSpec(c.volumeName, podMountPath)
if err != nil {
t.Errorf("ConstructVolumeSpec failed: %v", err)
} else {
if spec.Volume.RBD.RBDPool != pool {
t.Errorf("Mismatch rbd pool: wanted %s, got %s", pool, spec.Volume.RBD.RBDPool)
}
if spec.Volume.RBD.RBDImage != image {
t.Fatalf("Mismatch rbd image: wanted %s, got %s", image, spec.Volume.RBD.RBDImage)
}
}
if err = fakeMounter.Unmount(podMountPath); err != nil {
t.Fatalf("Unmount pod path %s failed: %v", podMountPath, err)
}
if err = fakeMounter.Unmount(c.targetPath); err != nil {
t.Fatalf("Unmount device path %s failed: %v", c.targetPath, err)
}
}
}

View File

@ -25,11 +25,9 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path"
"regexp"
"strconv"
"strings"
"time"
@ -37,7 +35,10 @@ import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
fileutil "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
@ -48,44 +49,57 @@ const (
imageSizeStr = "size "
sizeDivStr = " MB in"
kubeLockMagic = "kubelet_lock_magic_"
// The following three values are used for 30 seconds timeout
// while waiting for RBD Watcher to expire.
rbdImageWatcherInitDelay = 1 * time.Second
rbdImageWatcherFactor = 1.4
rbdImageWatcherSteps = 10
)
var (
clientKubeLockMagicRe = regexp.MustCompile("client.* " + kubeLockMagic + ".*")
)
// search /sys/bus for rbd device that matches given pool and image
func getDevFromImageAndPool(pool, image string) (string, bool) {
device, found := getRbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
device, found = getNbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
return "", false
}
// Search /sys/bus for rbd device that matches given pool and image.
func getRbdDevFromImageAndPool(pool string, image string) (string, bool) {
// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
sys_path := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sys_path); err == nil {
for _, f := range dirs {
// pool and name format:
// Pool and name format:
// see rbd_pool_show() and rbd_name_show() at
// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
name := f.Name()
// first match pool, then match name
// First match pool, then match name.
poolFile := path.Join(sys_path, name, "pool")
poolBytes, err := ioutil.ReadFile(poolFile)
if err != nil {
glog.V(4).Infof("Error reading %s: %v", poolFile, err)
glog.V(4).Infof("error reading %s: %v", poolFile, err)
continue
}
if strings.TrimSpace(string(poolBytes)) != pool {
glog.V(4).Infof("Device %s is not %q: %q", name, pool, string(poolBytes))
glog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes))
continue
}
imgFile := path.Join(sys_path, name, "name")
imgBytes, err := ioutil.ReadFile(imgFile)
if err != nil {
glog.V(4).Infof("Error reading %s: %v", imgFile, err)
glog.V(4).Infof("error reading %s: %v", imgFile, err)
continue
}
if strings.TrimSpace(string(imgBytes)) != image {
glog.V(4).Infof("Device %s is not %q: %q", name, image, string(imgBytes))
glog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes))
continue
}
// found a match, check if device exists
// Found a match, check if device exists.
devicePath := "/dev/rbd" + name
if _, err := os.Lstat(devicePath); err == nil {
return devicePath, true
@ -95,24 +109,158 @@ func getDevFromImageAndPool(pool, image string) (string, bool) {
return "", false
}
// stat a path, if not exists, retry maxRetries times
func waitForPath(pool, image string, maxRetries int) (string, bool) {
for i := 0; i < maxRetries; i++ {
devicePath, found := getDevFromImageAndPool(pool, image)
if found {
return devicePath, true
func getMaxNbds() (int, error) {
// the max number of nbd devices may be found in maxNbdsPath
// we will check sysfs for possible nbd devices even if this is not available
maxNbdsPath := "/sys/module/nbd/parameters/nbds_max"
_, err := os.Lstat(maxNbdsPath)
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to retrieve max_nbds from %s err: %q", maxNbdsPath, err)
}
glog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath)
maxNbdBytes, err := ioutil.ReadFile(maxNbdsPath)
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds from %s err: %q", maxNbdsPath, err)
}
maxNbds, err := strconv.Atoi(strings.TrimSpace(string(maxNbdBytes)))
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds err: %q", err)
}
glog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds)
return maxNbds, nil
}
// Locate any existing rbd-nbd process mapping given a <pool, image>.
// Recent versions of rbd-nbd tool can correctly provide this info using list-mapped
// but older versions of list-mapped don't.
// The implementation below peeks at the command line of nbd bound processes
// to figure out any mapped images.
func getNbdDevFromImageAndPool(pool string, image string) (string, bool) {
// nbd module exports the pid of serving process in sysfs
basePath := "/sys/block/nbd"
// Do not change imgPath format - some tools like rbd-nbd are strict about it.
imgPath := fmt.Sprintf("%s/%s", pool, image)
maxNbds, maxNbdsErr := getMaxNbds()
if maxNbdsErr != nil {
glog.V(4).Infof("error reading nbds_max %v", maxNbdsErr)
return "", false
}
for i := 0; i < maxNbds; i++ {
nbdPath := basePath + strconv.Itoa(i)
_, err := os.Lstat(nbdPath)
if err != nil {
glog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err)
continue
}
if i == maxRetries-1 {
break
pidBytes, err := ioutil.ReadFile(path.Join(nbdPath, "pid"))
if err != nil {
glog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err)
continue
}
time.Sleep(time.Second)
cmdlineFileName := path.Join("/proc", strings.TrimSpace(string(pidBytes)), "cmdline")
rawCmdline, err := ioutil.ReadFile(cmdlineFileName)
if err != nil {
glog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err)
continue
}
cmdlineArgs := strings.FieldsFunc(string(rawCmdline), func(r rune) bool {
return r == '\u0000'
})
// Check if this process is mapping a rbd device.
// Only accepted pattern of cmdline is from execRbdMap:
// rbd-nbd map pool/image ...
if len(cmdlineArgs) < 3 || cmdlineArgs[0] != "rbd-nbd" || cmdlineArgs[1] != "map" {
glog.V(4).Infof("nbd device %s is not used by rbd", nbdPath)
continue
}
if cmdlineArgs[2] != imgPath {
glog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s",
nbdPath, imgPath, cmdlineArgs[2])
continue
}
devicePath := path.Join("/dev", "nbd"+strconv.Itoa(i))
if _, err := os.Lstat(devicePath); err != nil {
glog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err)
continue
}
return devicePath, true
}
return "", false
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image
// Stat a path, if it doesn't exist, retry maxRetries times.
func waitForPath(pool, image string, maxRetries int, useNbdDriver bool) (string, bool) {
for i := 0; i < maxRetries; i++ {
if i != 0 {
time.Sleep(time.Second)
}
if useNbdDriver {
if devicePath, found := getNbdDevFromImageAndPool(pool, image); found {
return devicePath, true
}
} else {
if devicePath, found := getRbdDevFromImageAndPool(pool, image); found {
return devicePath, true
}
}
}
return "", false
}
// Execute command to map a rbd device for mounter.
// rbdCmd is driver dependent and either "rbd" or "rbd-nbd".
func execRbdMap(b rbdMounter, rbdCmd string, mon string) ([]byte, error) {
// Commandline: rbdCmd map imgPath ...
// do not change this format - some tools like rbd-nbd are strict about it.
imgPath := fmt.Sprintf("%s/%s", b.Pool, b.Image)
if b.Secret != "" {
return b.exec.Run(rbdCmd,
"map", imgPath, "--id", b.Id, "-m", mon, "--key="+b.Secret)
} else {
return b.exec.Run(rbdCmd,
"map", imgPath, "--id", b.Id, "-m", mon, "-k", b.Keyring)
}
}
// Check if rbd-nbd tools are installed.
func checkRbdNbdTools(e mount.Exec) bool {
_, err := e.Run("modprobe", "nbd")
if err != nil {
glog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err)
return false
}
if _, err := e.Run("rbd-nbd", "--version"); err != nil {
glog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err)
return false
}
glog.V(3).Infof("rbd-nbd tools were found.")
return true
}
// Make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/pool-image-image.
func makePDNameInternal(host volume.VolumeHost, pool string, image string) string {
return path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image)
// Backward compatibility for the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/pool-image-image.
deprecatedDir := path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image)
info, err := os.Stat(deprecatedDir)
if err == nil && info.IsDir() {
// The device mount path has already been created with the deprecated format, return it.
glog.V(5).Infof("Deprecated format path %s found", deprecatedDir)
return deprecatedDir
}
// Return the canonical format path.
return path.Join(host.GetPluginDir(rbdPluginName), mount.MountsInGlobalPDPath, pool+"-image-"+image)
}
// Make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/volumeDevices/pool-image-image.
func makeVDPDNameInternal(host volume.VolumeHost, pool string, image string) string {
return path.Join(host.GetVolumeDevicePluginDir(rbdPluginName), pool+"-image-"+image)
}
// RBDUtil implements diskManager interface.
@ -124,6 +272,10 @@ func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (util *RBDUtil) MakeGlobalVDPDName(rbd rbd) string {
return makeVDPDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func rbdErrors(runErr, resultErr error) error {
if err, ok := runErr.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
@ -133,9 +285,21 @@ func rbdErrors(runErr, resultErr error) error {
return resultErr
}
// rbdLock acquires a lock on image if lock is true, otherwise releases if a
// lock is found on image.
func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error {
// 'rbd' utility builds a comma-separated list of monitor addresses from '-m' /
// '--mon_host` parameter (comma, semi-colon, or white-space delimited monitor
// addresses) and send it to kernel rbd/libceph modules, which can accept
// comma-seprated list of monitor addresses (e.g. ip1[:port1][,ip2[:port2]...])
// in their first version in linux (see
// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/ceph_common.c#L239).
// Also, libceph module chooses monitor randomly, so we can simply pass all
// addresses without randomization (see
// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/mon_client.c#L132).
func (util *RBDUtil) kernelRBDMonitorsOpt(mons []string) string {
return strings.Join(mons, ",")
}
// rbdUnlock releases a lock on image if found.
func (util *RBDUtil) rbdUnlock(b rbdMounter) error {
var err error
var output, locker string
var cmd []byte
@ -153,98 +317,43 @@ func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error {
b.adminSecret = b.Secret
}
// construct lock id using host name and a magic prefix
// Construct lock id using host name and a magic prefix.
lock_id := kubeLockMagic + node.GetHostname("")
l := len(b.Mon)
// avoid mount storm, pick a host randomly
start := rand.Int() % l
// iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ {
mon := b.Mon[i%l]
// cmd "rbd lock list" serves two purposes:
// for fencing, check if lock already held for this host
// this edge case happens if host crashes in the middle of acquiring lock and mounting rbd
// for defencing, get the locker name, something like "client.1234"
args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
output = string(cmd)
glog.Infof("lock list output %q", output)
if err != nil {
continue
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
if lock {
// check if lock is already held for this host by matching lock_id and rbd lock id
if strings.Contains(output, lock_id) {
// this host already holds the lock, exit
glog.V(1).Infof("rbd: lock already held for %s", lock_id)
return nil
}
// clean up orphaned lock if no watcher on the image
used, rbdOutput, statusErr := util.rbdStatus(&b)
if statusErr != nil {
return fmt.Errorf("rbdStatus failed error %v, rbd output: %v", statusErr, rbdOutput)
}
if used {
// this image is already used by a node other than this node
return fmt.Errorf("rbd image: %s/%s is already used by a node other than this node, rbd output: %v", b.Image, b.Pool, output)
}
// best effort clean up orphaned locked if not used
locks := clientKubeLockMagicRe.FindAllStringSubmatch(output, -1)
for _, v := range locks {
if len(v) > 0 {
lockInfo := strings.Split(v[0], " ")
if len(lockInfo) > 2 {
args := []string{"lock", "remove", b.Image, lockInfo[1], lockInfo[0], "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
glog.Infof("remove orphaned locker %s from client %s: err %v, rbd output: %s", lockInfo[1], lockInfo[0], err, string(cmd))
}
}
}
// hold a lock: rbd lock add
args := []string{"lock", "add", b.Image, lock_id, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
if err == nil {
glog.V(4).Infof("rbd: successfully add lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon)
}
} else {
// defencing, find locker name
ind := strings.LastIndex(output, lock_id) - 1
for i := ind; i >= 0; i-- {
if output[i] == '\n' {
locker = output[(i + 1):ind]
break
}
}
// remove a lock if found: rbd lock remove
if len(locker) > 0 {
args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
if err == nil {
glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon)
}
}
}
if err == nil {
// break if operation succeeds
// Get the locker name, something like "client.1234".
args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
output = string(cmd)
glog.V(4).Infof("lock list output %q", output)
if err != nil {
return err
}
ind := strings.LastIndex(output, lock_id) - 1
for i := ind; i >= 0; i-- {
if output[i] == '\n' {
locker = output[(i + 1):ind]
break
}
}
// Remove a lock if found: rbd lock remove.
if len(locker) > 0 {
args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
if err == nil {
glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon)
}
}
return err
}
// AttachDisk attaches the disk on the node.
// If Volume is not read-only, acquire a lock on image first.
func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) {
var err error
var output []byte
globalPDPath := util.MakeGlobalPDName(*b.rbd)
@ -256,49 +365,72 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) {
}
}
devicePath, found := waitForPath(b.Pool, b.Image, 1)
if !found {
// Evalute whether this device was mapped with rbd.
devicePath, mapped := waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, false /*useNbdDriver*/)
// If rbd-nbd tools are found, we will fallback to it should the default krbd driver fail.
nbdToolsFound := false
if !mapped {
nbdToolsFound := checkRbdNbdTools(b.exec)
if nbdToolsFound {
devicePath, mapped = waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, true /*useNbdDriver*/)
}
}
if !mapped {
// Currently, we don't acquire advisory lock on image, but for backward
// compatibility, we need to check if the image is being used by nodes running old kubelet.
// osd_client_watch_timeout defaults to 30 seconds, if the watcher stays active longer than 30 seconds,
// rbd image does not get mounted and failure message gets generated.
backoff := wait.Backoff{
Duration: rbdImageWatcherInitDelay,
Factor: rbdImageWatcherFactor,
Steps: rbdImageWatcherSteps,
}
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
used, rbdOutput, err := util.rbdStatus(&b)
if err != nil {
return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput)
}
return !used, nil
})
// Return error if rbd image has not become available for the specified timeout.
if err == wait.ErrWaitTimeout {
return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image)
}
// Return error if any other errors were encountered during wating for the image to become available.
if err != nil {
return "", err
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
glog.V(1).Infof("rbd: map mon %s", mon)
_, err = b.exec.Run("modprobe", "rbd")
if err != nil {
glog.Warningf("rbd: failed to load rbd kernel module:%v", err)
}
// Currently, we don't acquire advisory lock on image, but for backward
// compatibility, we need to check if the image is being used by nodes running old kubelet.
found, rbdOutput, err := util.rbdStatus(&b)
output, err = execRbdMap(b, "rbd", mon)
if err != nil {
return "", fmt.Errorf("error: %v, rbd output: %v", err, rbdOutput)
}
if found {
glog.Infof("rbd image %s/%s is still being used ", b.Pool, b.Image)
return "", fmt.Errorf("rbd image %s/%s is still being used. rbd output: %s", b.Pool, b.Image, rbdOutput)
}
// rbd map
l := len(b.Mon)
// avoid mount storm, pick a host randomly
start := rand.Int() % l
// iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ {
mon := b.Mon[i%l]
glog.V(1).Infof("rbd: map mon %s", mon)
if b.Secret != "" {
output, err = b.exec.Run("rbd",
"map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "--key="+b.Secret)
} else {
output, err = b.exec.Run("rbd",
"map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "-k", b.Keyring)
if !nbdToolsFound {
glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
}
if err == nil {
break
glog.V(3).Infof("rbd: map failed with %v, %s. Retrying with rbd-nbd", err, string(output))
errList := []error{err}
outputList := output
output, err = execRbdMap(b, "rbd-nbd", mon)
if err != nil {
errList = append(errList, err)
outputList = append(outputList, output...)
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", errors.NewAggregate(errList), string(outputList))
}
glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output))
devicePath, mapped = waitForPath(b.Pool, b.Image, 10 /*maxRetries*/, true /*useNbdDrive*/)
} else {
devicePath, mapped = waitForPath(b.Pool, b.Image, 10 /*maxRetries*/, false /*useNbdDriver*/)
}
if err != nil {
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
}
devicePath, found = waitForPath(b.Pool, b.Image, 10)
if !found {
if !mapped {
return "", fmt.Errorf("Could not map image %s/%s, Timeout after 10s", b.Pool, b.Image)
}
}
@ -312,9 +444,21 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
// rbd unmap
exec := plugin.host.GetExec(plugin.GetPluginName())
output, err := exec.Run("rbd", "unmap", device)
var rbdCmd string
// Unlike map, we cannot fallthrough for unmap
// the tool to unmap is based on device type
if strings.HasPrefix(device, "/dev/nbd") {
rbdCmd = "rbd-nbd"
} else {
rbdCmd = "rbd"
}
// rbd unmap
output, err := exec.Run(rbdCmd, "unmap", device)
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %v", device, err, output))
}
@ -339,11 +483,54 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic
return nil
}
// DetachBlockDisk detaches the disk from the node.
func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error {
if pathExists, pathErr := volutil.PathExists(mapPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath)
return nil
}
// If we arrive here, device is no longer used, see if we need to logout of the target
device, err := getBlockVolumeDevice(mapPath)
if err != nil {
return err
}
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
exec := disk.plugin.host.GetExec(disk.plugin.GetPluginName())
var rbdCmd string
// Unlike map, we cannot fallthrough here.
// Any nbd device must be unmapped by rbd-nbd
if strings.HasPrefix(device, "/dev/nbd") {
rbdCmd = "rbd-nbd"
glog.V(4).Infof("rbd: using rbd-nbd for unmap function")
} else {
rbdCmd = "rbd"
glog.V(4).Infof("rbd: using rbd for unmap function")
}
// rbd unmap
output, err := exec.Run(rbdCmd, "unmap", device)
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output)))
}
glog.V(3).Infof("rbd: successfully unmap device %s", device)
return nil
}
// cleanOldRBDFile read rbd info from rbd.json file and removes lock if found.
// At last, it removes rbd.json file.
func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error {
mounter := &rbdMounter{
// util.rbdLock needs it to run command.
// util.rbdUnlock needs it to run command.
rbd: newRBD("", "", "", "", false, plugin, util),
}
fp, err := os.Open(rbdFile)
@ -361,10 +548,10 @@ func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error {
glog.Errorf("failed to load rbd info from %s: %v", rbdFile, err)
return err
}
// remove rbd lock if found
// the disk is not attached to this node anymore, so the lock on image
// for this node can be removed safely
err = util.rbdLock(*mounter, false)
// Remove rbd lock if found.
// The disk is not attached to this node anymore, so the lock on image
// for this node can be removed safely.
err = util.rbdUnlock(*mounter)
if err == nil {
os.Remove(rbdFile)
}
@ -375,37 +562,26 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo
var output []byte
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// convert to MB that rbd defaults on
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
// Convert to MB that rbd defaults on.
sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024))
volSz := fmt.Sprintf("%d", sz)
// rbd create
l := len(p.rbdMounter.Mon)
// pick a mon randomly
start := rand.Int() % l
// iterate all monitors until create succeeds.
for i := start; i < start+l; i++ {
mon := p.Mon[i%l]
if p.rbdMounter.imageFormat == rbdImageFormat2 {
glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
} else {
glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
}
args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat}
if p.rbdMounter.imageFormat == rbdImageFormat2 {
// if no image features is provided, it results in empty string
// which disable all RBD image format 2 features as we expected
features := strings.Join(p.rbdMounter.imageFeatures, ",")
args = append(args, "--image-feature", features)
}
output, err = p.exec.Run("rbd", args...)
if err == nil {
break
} else {
glog.Warningf("failed to create rbd image, output %v", string(output))
}
mon := util.kernelRBDMonitorsOpt(p.Mon)
if p.rbdMounter.imageFormat == rbdImageFormat2 {
glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
} else {
glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
}
args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat}
if p.rbdMounter.imageFormat == rbdImageFormat2 {
// If no image features is provided, it results in empty string
// which disable all RBD image format 2 features as expected.
features := strings.Join(p.rbdMounter.imageFeatures, ",")
args = append(args, "--image-feature", features)
}
output, err = p.exec.Run("rbd", args...)
if err != nil {
glog.Warningf("failed to create rbd image, output %v", string(output))
return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output))
}
@ -426,36 +602,30 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error {
glog.Info("rbd is still being used ", p.rbdMounter.Image)
return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput)
}
// rbd rm
l := len(p.rbdMounter.Mon)
// pick a mon randomly
start := rand.Int() % l
// iterate all monitors until rm succeeds.
for i := start; i < start+l; i++ {
mon := p.rbdMounter.Mon[i%l]
glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
output, err = p.exec.Run("rbd",
"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret)
if err == nil {
return nil
} else {
glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
}
// rbd rm.
mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon)
glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
output, err = p.exec.Run("rbd",
"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret)
if err == nil {
return nil
}
glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
return fmt.Errorf("error %v, rbd output: %v", err, string(output))
}
// ExpandImage runs rbd resize command to resize the specified image
// ExpandImage runs rbd resize command to resize the specified image.
func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
var output []byte
var err error
volSizeBytes := newSize.Value()
// convert to MB that rbd defaults on
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
// Convert to MB that rbd defaults on.
sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024))
newVolSz := fmt.Sprintf("%d", sz)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz))
// check the current size of rbd image, if equals to or greater that the new request size, do nothing
// Check the current size of rbd image, if equals to or greater that the new request size, do nothing.
curSize, infoErr := util.rbdInfo(rbdExpander.rbdMounter)
if infoErr != nil {
return oldSize, fmt.Errorf("rbd info failed, error: %v", infoErr)
@ -464,26 +634,20 @@ func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resourc
return newSizeQuant, nil
}
// rbd resize
l := len(rbdExpander.rbdMounter.Mon)
// pick a mon randomly
start := rand.Int() % l
// iterate all monitors until resize succeeds.
for i := start; i < start+l; i++ {
mon := rbdExpander.rbdMounter.Mon[i%l]
glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret)
output, err = rbdExpander.exec.Run("rbd",
"resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret)
if err == nil {
return newSizeQuant, nil
} else {
glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output))
}
// rbd resize.
mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon)
glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret)
output, err = rbdExpander.exec.Run("rbd",
"resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret)
if err == nil {
return newSizeQuant, nil
}
glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output))
return oldSize, err
}
// rbdInfo runs `rbd info` command to get the current image size in MB
// rbdInfo runs `rbd info` command to get the current image size in MB.
func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) {
var err error
var output string
@ -497,44 +661,34 @@ func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) {
secret = b.Secret
}
l := len(b.Mon)
start := rand.Int() % l
// iterate all hosts until rbd command succeeds.
for i := start; i < start+l; i++ {
mon := b.Mon[i%l]
// cmd "rbd info" get the image info with the following output:
//
// # image exists (exit=0)
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08
// size 1024 MB in 256 objects
// order 22 (4096 kB objects)
// block_name_prefix: rbd_data.1253ac238e1f29
// format: 2
// ...
//
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json
// {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]}
//
//
// # image does not exist (exit=2)
// rbd: error opening image 1234: (2) No such file or directory
//
glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
mon := util.kernelRBDMonitorsOpt(b.Mon)
// cmd "rbd info" get the image info with the following output:
//
// # image exists (exit=0)
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08
// size 1024 MB in 256 objects
// order 22 (4096 kB objects)
// block_name_prefix: rbd_data.1253ac238e1f29
// format: 2
// ...
//
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json
// {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]}
//
//
// # image does not exist (exit=2)
// rbd: error opening image 1234: (2) No such file or directory
//
glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
// break if command succeeds
if err == nil {
break
}
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
// fail fast if command not found
return 0, err
}
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
// fail fast if rbd command is not found.
return 0, err
}
}
@ -547,7 +701,7 @@ func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) {
return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, output)
}
// get the size value string, just between `size ` and ` MB in`, such as `size 1024 MB in 256 objects`
// Get the size value string, just between `size ` and ` MB in`, such as `size 1024 MB in 256 objects`.
sizeIndex := strings.Index(output, imageSizeStr)
divIndex := strings.Index(output, sizeDivStr)
if sizeIndex == -1 || divIndex == -1 || divIndex <= sizeIndex+5 {
@ -576,41 +730,31 @@ func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) {
secret = b.Secret
}
l := len(b.Mon)
start := rand.Int() % l
// iterate all hosts until rbd command succeeds.
for i := start; i < start+l; i++ {
mon := b.Mon[i%l]
// cmd "rbd status" list the rbd client watch with the following output:
//
// # there is a watcher (exit=0)
// Watchers:
// watcher=10.16.153.105:0/710245699 client.14163 cookie=1
//
// # there is no watcher (exit=0)
// Watchers: none
//
// Otherwise, exit is non-zero, for example:
//
// # image does not exist (exit=2)
// rbd: error opening image kubernetes-dynamic-pvc-<UUID>: (2) No such file or directory
//
glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
mon := util.kernelRBDMonitorsOpt(b.Mon)
// cmd "rbd status" list the rbd client watch with the following output:
//
// # there is a watcher (exit=0)
// Watchers:
// watcher=10.16.153.105:0/710245699 client.14163 cookie=1
//
// # there is no watcher (exit=0)
// Watchers: none
//
// Otherwise, exit is non-zero, for example:
//
// # image does not exist (exit=2)
// rbd: error opening image kubernetes-dynamic-pvc-<UUID>: (2) No such file or directory
//
glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
// break if command succeeds
if err == nil {
break
}
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
// fail fast if command not found
return false, output, err
}
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
// fail fast if command not found
return false, output, err
}
}