vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

68
vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD generated vendored Normal file
View File

@ -0,0 +1,68 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"disk_manager.go",
"doc.go",
"rbd.go",
"rbd_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/rbd",
deps = [
"//pkg/util/file:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["rbd_test.go"],
importpath = "k8s.io/kubernetes/pkg/volume/rbd",
library = ":go_default_library",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

9
vendor/k8s.io/kubernetes/pkg/volume/rbd/OWNERS generated vendored Normal file
View File

@ -0,0 +1,9 @@
approvers:
- rootfs
reviewers:
- sjenning
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42

225
vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go generated vendored Normal file
View File

@ -0,0 +1,225 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"os"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// NewAttacher implements AttachableVolumePlugin.NewAttacher.
func (plugin *rbdPlugin) NewAttacher() (volume.Attacher, error) {
return plugin.newAttacherInternal(&RBDUtil{})
}
func (plugin *rbdPlugin) newAttacherInternal(manager diskManager) (volume.Attacher, error) {
return &rbdAttacher{
plugin: plugin,
manager: manager,
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
// NewDetacher implements AttachableVolumePlugin.NewDetacher.
func (plugin *rbdPlugin) NewDetacher() (volume.Detacher, error) {
return plugin.newDetacherInternal(&RBDUtil{})
}
func (plugin *rbdPlugin) newDetacherInternal(manager diskManager) (volume.Detacher, error) {
return &rbdDetacher{
plugin: plugin,
manager: manager,
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
// GetDeviceMountRefs implements AttachableVolumePlugin.GetDeviceMountRefs.
func (plugin *rbdPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
// rbdAttacher implements volume.Attacher interface.
type rbdAttacher struct {
plugin *rbdPlugin
mounter *mount.SafeFormatAndMount
manager diskManager
}
var _ volume.Attacher = &rbdAttacher{}
// Attach implements Attacher.Attach.
// We do not lock image here, because it requires kube-controller-manager to
// access external `rbd` utility. And there is no need since AttachDetach
// controller will not try to attach RWO volumes which are already attached to
// other nodes.
func (attacher *rbdAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
return "", nil
}
// VolumesAreAttached implements Attacher.VolumesAreAttached.
// There is no way to confirm whether the volume is attached or not from
// outside of the kubelet node. This method needs to return true always, like
// iSCSI, FC plugin.
func (attacher *rbdAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
for _, spec := range specs {
volumesAttachedCheck[spec] = true
}
return volumesAttachedCheck, nil
}
// WaitForAttach implements Attacher.WaitForAttach. It's called by kublet to
// attach volume onto the node.
// This method is idempotent, callers are responsible for retrying on failure.
func (attacher *rbdAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
glog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID)
mounter, err := attacher.plugin.createMounterFromVolumeSpecAndPod(spec, pod)
if err != nil {
glog.Warningf("failed to create mounter: %v", spec)
return "", err
}
realDevicePath, err := attacher.manager.AttachDisk(*mounter)
if err != nil {
return "", err
}
glog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath)
return realDevicePath, nil
}
// GetDeviceMountPath implements Attacher.GetDeviceMountPath.
func (attacher *rbdAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
img, err := getVolumeSourceImage(spec)
if err != nil {
return "", err
}
pool, err := getVolumeSourcePool(spec)
if err != nil {
return "", err
}
return makePDNameInternal(attacher.plugin.host, pool, img), nil
}
// MountDevice implements Attacher.MountDevice. It is called by the kubelet to
// mount device at the given mount path.
// This method is idempotent, callers are responsible for retrying on failure.
func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
glog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath)
notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
if !notMnt {
return nil
}
fstype, err := getVolumeSourceFSType(spec)
if err != nil {
return err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return err
}
options := []string{}
if ro {
options = append(options, "ro")
}
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return fmt.Errorf("rbd: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err)
}
glog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype)
return nil
}
// rbdDetacher implements volume.Detacher interface.
type rbdDetacher struct {
plugin *rbdPlugin
manager diskManager
mounter *mount.SafeFormatAndMount
}
var _ volume.Detacher = &rbdDetacher{}
// UnmountDevice implements Detacher.UnmountDevice. It unmounts the global
// mount of the RBD image. This is called once all bind mounts have been
// unmounted.
// Internally, it does four things:
// - Unmount device from deviceMountPath.
// - Detach device from the node.
// - Remove lock if found. (No need to check volume readonly or not, because
// device is not on the node anymore, it's safe to remove lock.)
// - Remove the deviceMountPath at last.
// This method is idempotent, callers are responsible for retrying on failure.
func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error {
if pathExists, pathErr := volutil.PathExists(deviceMountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
return nil
}
devicePath, cnt, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
if err != nil {
return err
}
if cnt > 1 {
return fmt.Errorf("rbd: more than 1 reference counts at %s", deviceMountPath)
}
if cnt == 1 {
// Unmount the device from the device mount point.
glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath)
if err = detacher.mounter.Unmount(deviceMountPath); err != nil {
return err
}
glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath)
}
glog.V(4).Infof("rbd: detaching device %s", devicePath)
err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath)
if err != nil {
return err
}
glog.V(3).Infof("rbd: successfully detach device %s", devicePath)
err = os.Remove(deviceMountPath)
if err != nil {
return err
}
glog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath)
return nil
}
// Detach implements Detacher.Detach.
func (detacher *rbdDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}

129
vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// diskManager interface and diskSetup/TearDown functions abstract commonly used procedures to setup a block volume
// rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume unmounter.
// TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD
//
package rbd
import (
"fmt"
"os"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
// Abstract interface to disk operations.
type diskManager interface {
// MakeGlobalPDName creates global persistent disk path.
MakeGlobalPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine.
// If it successfully attaches, the path to the device
// is returned. Otherwise, an error will be returned.
AttachDisk(disk rbdMounter) (string, error)
// Detaches the disk from the kubelet's host machine.
DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error
// Creates a rbd image.
CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error)
// Deletes a rbd image.
DeleteImage(deleter *rbdVolumeDeleter) error
// Expands a rbd image
ExpandImage(expander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
globalPDPath := manager.MakeGlobalPDName(*b.rbd)
notMnt, err := mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", globalPDPath)
return err
}
if notMnt {
return fmt.Errorf("no device is mounted at %s", globalPDPath)
}
notMnt, err = mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if !notMnt {
return nil
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if (&b).GetAttributes().ReadOnly {
options = append(options, "ro")
}
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
if err != nil {
glog.Errorf("failed to bind mount:%s", globalPDPath)
return err
}
glog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions)
if !b.ReadOnly {
volume.SetVolumeOwnership(&b, fsGroup)
}
return nil
}
// utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter mount.Interface) error {
notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if notMnt {
glog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath)
return os.Remove(volPath)
}
// Unmount the bind-mount inside this pod.
if err := mounter.Unmount(volPath); err != nil {
glog.Errorf("failed to umount %s", volPath)
return err
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notMnt {
if err := os.Remove(volPath); err != nil {
glog.V(2).Info("Error removing mountpoint ", volPath, ": ", err)
return err
}
}
return nil
}

19
vendor/k8s.io/kubernetes/pkg/volume/rbd/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rbd contains the internal representation of Rados Block Store (Ceph)
// volumes.
package rbd // import "k8s.io/kubernetes/pkg/volume/rbd"

787
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go generated vendored Normal file
View File

@ -0,0 +1,787 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
dstrings "strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
var (
supportedFeatures = sets.NewString("layering")
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&rbdPlugin{}}
}
// rbdPlugin implements Volume.VolumePlugin.
type rbdPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &rbdPlugin{}
var _ volume.PersistentVolumePlugin = &rbdPlugin{}
var _ volume.DeletableVolumePlugin = &rbdPlugin{}
var _ volume.ProvisionableVolumePlugin = &rbdPlugin{}
var _ volume.AttachableVolumePlugin = &rbdPlugin{}
var _ volume.ExpandableVolumePlugin = &rbdPlugin{}
const (
rbdPluginName = "kubernetes.io/rbd"
secretKeyName = "key" // key name used in secret
rbdImageFormat1 = "1"
rbdImageFormat2 = "2"
rbdDefaultAdminId = "admin"
rbdDefaultAdminSecretNamespace = "default"
rbdDefaultPool = "rbd"
rbdDefaultUserId = rbdDefaultAdminId
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(rbdPluginName), volName)
}
func (plugin *rbdPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *rbdPlugin) GetPluginName() string {
return rbdPluginName
}
func (plugin *rbdPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
mon, err := getVolumeSourceMonitors(spec)
if err != nil {
return "", err
}
img, err := getVolumeSourceImage(spec)
if err != nil {
return "", err
}
return fmt.Sprintf(
"%v:%v",
mon,
img), nil
}
func (plugin *rbdPlugin) CanSupport(spec *volume.Spec) bool {
if (spec.Volume != nil && spec.Volume.RBD == nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil) {
return false
}
return true
}
func (plugin *rbdPlugin) RequiresRemount() bool {
return false
}
func (plugin *rbdPlugin) SupportsMountOption() bool {
return true
}
func (plugin *rbdPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *rbdPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
type rbdVolumeExpander struct {
*rbdMounter
}
func (plugin *rbdPlugin) getAdminAndSecret(spec *volume.Spec) (string, string, error) {
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
if err != nil {
return "", "", err
}
adminSecretName := ""
adminSecretNamespace := rbdDefaultAdminSecretNamespace
admin := ""
for k, v := range class.Parameters {
switch dstrings.ToLower(k) {
case "adminid":
admin = v
case "adminsecretname":
adminSecretName = v
case "adminsecretnamespace":
adminSecretNamespace = v
}
}
if admin == "" {
admin = rbdDefaultAdminId
}
secret, err := parsePVSecret(adminSecretNamespace, adminSecretName, plugin.host.GetKubeClient())
if err != nil {
return admin, "", fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err)
}
return admin, secret, nil
}
func (plugin *rbdPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil {
return oldSize, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil")
}
// get admin and secret
admin, secret, err := plugin.getAdminAndSecret(spec)
if err != nil {
return oldSize, err
}
expander := &rbdVolumeExpander{
rbdMounter: &rbdMounter{
rbd: &rbd{
volName: spec.Name(),
Image: spec.PersistentVolume.Spec.RBD.RBDImage,
Pool: spec.PersistentVolume.Spec.RBD.RBDPool,
plugin: plugin,
manager: &RBDUtil{},
mounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(plugin.GetPluginName())},
exec: plugin.host.GetExec(plugin.GetPluginName()),
},
Mon: spec.PersistentVolume.Spec.RBD.CephMonitors,
adminId: admin,
adminSecret: secret,
},
}
expandedSize, err := expander.ResizeImage(oldSize, newSize)
if err != nil {
return oldSize, err
} else {
return expandedSize, nil
}
}
func (expander *rbdVolumeExpander) ResizeImage(oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
return expander.manager.ExpandImage(expander, oldSize, newSize)
}
func (plugin *rbdPlugin) RequiresFSResize() bool {
return true
}
func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, pod *v1.Pod) (*rbdMounter, error) {
var err error
mon, err := getVolumeSourceMonitors(spec)
if err != nil {
return nil, err
}
img, err := getVolumeSourceImage(spec)
if err != nil {
return nil, err
}
fstype, err := getVolumeSourceFSType(spec)
if err != nil {
return nil, err
}
pool, err := getVolumeSourcePool(spec)
if err != nil {
return nil, err
}
id, err := getVolumeSourceUser(spec)
if err != nil {
return nil, err
}
keyring, err := getVolumeSourceKeyRing(spec)
if err != nil {
return nil, err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return nil, err
}
secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace)
if err != nil {
return nil, err
}
secret := ""
if len(secretName) > 0 && len(secretNs) > 0 {
// if secret is provideded, retrieve it
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
return nil, err
}
for _, data := range secrets.Data {
secret = string(data)
}
}
return &rbdMounter{
rbd: newRBD("", spec.Name(), img, pool, ro, plugin, &RBDUtil{}),
Mon: mon,
Id: id,
Keyring: keyring,
Secret: secret,
fsType: fstype,
}, nil
}
func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace)
if err != nil {
return nil, err
}
secret := ""
if len(secretName) > 0 && len(secretNs) > 0 {
// if secret is provideded, retrieve it
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
return nil, err
}
for _, data := range secrets.Data {
secret = string(data)
}
}
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, secret)
}
func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, secret string) (volume.Mounter, error) {
mon, err := getVolumeSourceMonitors(spec)
if err != nil {
return nil, err
}
img, err := getVolumeSourceImage(spec)
if err != nil {
return nil, err
}
fstype, err := getVolumeSourceFSType(spec)
if err != nil {
return nil, err
}
pool, err := getVolumeSourcePool(spec)
if err != nil {
return nil, err
}
id, err := getVolumeSourceUser(spec)
if err != nil {
return nil, err
}
keyring, err := getVolumeSourceKeyRing(spec)
if err != nil {
return nil, err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return nil, err
}
return &rbdMounter{
rbd: newRBD(podUID, spec.Name(), img, pool, ro, plugin, manager),
Mon: mon,
Id: id,
Keyring: keyring,
Secret: secret,
fsType: fstype,
mountOptions: volume.MountOptionFromSpec(spec),
}, nil
}
func (plugin *rbdPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &RBDUtil{})
}
func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager) (volume.Unmounter, error) {
return &rbdUnmounter{
rbdMounter: &rbdMounter{
rbd: newRBD(podUID, volName, "", "", false, plugin, manager),
Mon: make([]string, 0),
},
}, nil
}
func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
rbdVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{},
},
},
}
return volume.NewSpecFromVolume(rbdVolume), nil
}
func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil")
}
admin, secret, err := plugin.getAdminAndSecret(spec)
if err != nil {
return nil, err
}
return plugin.newDeleterInternal(spec, admin, secret, &RBDUtil{})
}
func (plugin *rbdPlugin) newDeleterInternal(spec *volume.Spec, admin, secret string, manager diskManager) (volume.Deleter, error) {
return &rbdVolumeDeleter{
rbdMounter: &rbdMounter{
rbd: newRBD("", spec.Name(), spec.PersistentVolume.Spec.RBD.RBDImage, spec.PersistentVolume.Spec.RBD.RBDPool, false, plugin, manager),
Mon: spec.PersistentVolume.Spec.RBD.CephMonitors,
adminId: admin,
adminSecret: secret,
}}, nil
}
func (plugin *rbdPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &RBDUtil{})
}
func (plugin *rbdPlugin) newProvisionerInternal(options volume.VolumeOptions, manager diskManager) (volume.Provisioner, error) {
return &rbdVolumeProvisioner{
rbdMounter: &rbdMounter{
rbd: newRBD("", "", "", "", false, plugin, manager),
},
options: options,
}, nil
}
// rbdVolumeProvisioner implements volume.Provisioner interface.
type rbdVolumeProvisioner struct {
*rbdMounter
options volume.VolumeOptions
}
var _ volume.Provisioner = &rbdVolumeProvisioner{}
func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", r.options.PVC.Spec.AccessModes, r.plugin.GetAccessModes())
}
if r.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim Selector is not supported")
}
var err error
adminSecretName := ""
adminSecretNamespace := rbdDefaultAdminSecretNamespace
secret := ""
secretName := ""
secretNamespace := ""
imageFormat := rbdImageFormat2
fstype := ""
for k, v := range r.options.Parameters {
switch dstrings.ToLower(k) {
case "monitors":
arr := dstrings.Split(v, ",")
for _, m := range arr {
r.Mon = append(r.Mon, m)
}
case "adminid":
r.adminId = v
case "adminsecretname":
adminSecretName = v
case "adminsecretnamespace":
adminSecretNamespace = v
case "userid":
r.Id = v
case "pool":
r.Pool = v
case "usersecretname":
secretName = v
case "usersecretnamespace":
secretNamespace = v
case "imageformat":
imageFormat = v
case "imagefeatures":
arr := dstrings.Split(v, ",")
for _, f := range arr {
if !supportedFeatures.Has(f) {
return nil, fmt.Errorf("invalid feature %q for volume plugin %s, supported features are: %v", f, r.plugin.GetPluginName(), supportedFeatures)
} else {
r.imageFeatures = append(r.imageFeatures, f)
}
}
case volume.VolumeParameterFSType:
fstype = v
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, r.plugin.GetPluginName())
}
}
// sanity check
if imageFormat != rbdImageFormat1 && imageFormat != rbdImageFormat2 {
return nil, fmt.Errorf("invalid ceph imageformat %s, expecting %s or %s",
imageFormat, rbdImageFormat1, rbdImageFormat2)
}
r.imageFormat = imageFormat
if adminSecretName == "" {
return nil, fmt.Errorf("missing Ceph admin secret name")
}
if secret, err = parsePVSecret(adminSecretNamespace, adminSecretName, r.plugin.host.GetKubeClient()); err != nil {
return nil, fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err)
}
r.adminSecret = secret
if len(r.Mon) < 1 {
return nil, fmt.Errorf("missing Ceph monitors")
}
if secretName == "" {
return nil, fmt.Errorf("missing user secret name")
}
if r.adminId == "" {
r.adminId = rbdDefaultAdminId
}
if r.Pool == "" {
r.Pool = rbdDefaultPool
}
if r.Id == "" {
r.Id = r.adminId
}
// create random image name
image := fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID())
r.rbdMounter.Image = image
rbd, sizeMB, err := r.manager.CreateImage(r)
if err != nil {
glog.Errorf("rbd: create volume failed, err: %v", err)
return nil, err
}
glog.Infof("successfully created rbd image %q", image)
pv := new(v1.PersistentVolume)
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volumehelper.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner")
rbd.SecretRef = new(v1.SecretReference)
rbd.SecretRef.Name = secretName
rbd.SecretRef.Namespace = secretNamespace
rbd.RadosUser = r.Id
rbd.FSType = fstype
pv.Spec.PersistentVolumeSource.RBD = rbd
pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy
pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = r.plugin.GetAccessModes()
}
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", sizeMB)),
}
pv.Spec.MountOptions = r.options.MountOptions
return pv, nil
}
// rbdVolumeDeleter implements volume.Deleter interface.
type rbdVolumeDeleter struct {
*rbdMounter
}
var _ volume.Deleter = &rbdVolumeDeleter{}
func (r *rbdVolumeDeleter) GetPath() string {
return getPath(r.podUID, r.volName, r.plugin.host)
}
func (r *rbdVolumeDeleter) Delete() error {
return r.manager.DeleteImage(r)
}
// rbd implmenets volume.Volume interface.
// It's embedded in Mounter/Unmounter/Deleter.
type rbd struct {
volName string
podUID types.UID
Pool string
Image string
ReadOnly bool
plugin *rbdPlugin
mounter *mount.SafeFormatAndMount
exec mount.Exec
// Utility interface that provides API calls to the provider to attach/detach disks.
manager diskManager
volume.MetricsProvider `json:"-"`
}
var _ volume.Volume = &rbd{}
func (rbd *rbd) GetPath() string {
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
return getPath(rbd.podUID, rbd.volName, rbd.plugin.host)
}
// newRBD creates a new rbd.
func newRBD(podUID types.UID, volName string, image string, pool string, readOnly bool, plugin *rbdPlugin, manager diskManager) *rbd {
return &rbd{
podUID: podUID,
volName: volName,
Image: image,
Pool: pool,
ReadOnly: readOnly,
plugin: plugin,
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
exec: plugin.host.GetExec(plugin.GetPluginName()),
manager: manager,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}
}
// rbdMounter implements volume.Mounter interface.
// It contains information which need to be persisted in whole life cycle of PV
// on the node. It is persisted at the very beginning in the pod mount point
// directory.
// Note: Capitalized field names of this struct determines the information
// persisted on the disk, DO NOT change them. (TODO: refactoring to use a dedicated struct?)
type rbdMounter struct {
*rbd
// capitalized so they can be exported in persistRBD()
Mon []string
Id string
Keyring string
Secret string
fsType string
adminSecret string
adminId string
mountOptions []string
imageFormat string
imageFeatures []string
}
var _ volume.Mounter = &rbdMounter{}
func (b *rbd) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.ReadOnly,
Managed: !b.ReadOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *rbdMounter) CanMount() error {
return nil
}
func (b *rbdMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
func (b *rbdMounter) SetUpAt(dir string, fsGroup *int64) error {
// diskSetUp checks mountpoints and prevent repeated calls
glog.V(4).Infof("rbd: attempting to setup at %s", dir)
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
if err != nil {
glog.Errorf("rbd: failed to setup at %s %v", dir, err)
}
glog.V(3).Infof("rbd: successfully setup at %s", dir)
return err
}
// rbdUnmounter implements volume.Unmounter interface.
type rbdUnmounter struct {
*rbdMounter
}
var _ volume.Unmounter = &rbdUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (c *rbdUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *rbdUnmounter) TearDownAt(dir string) error {
glog.V(4).Infof("rbd: attempting to teardown at %s", dir)
if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
err := diskTearDown(c.manager, *c, dir, c.mounter)
if err != nil {
return err
}
glog.V(3).Infof("rbd: successfully teardown at %s", dir)
return nil
}
func getVolumeSourceMonitors(spec *volume.Spec) ([]string, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD.CephMonitors, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
return spec.PersistentVolume.Spec.RBD.CephMonitors, nil
}
return nil, fmt.Errorf("Spec does not reference a RBD volume type")
}
func getVolumeSourceImage(spec *volume.Spec) (string, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD.RBDImage, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
return spec.PersistentVolume.Spec.RBD.RBDImage, nil
}
return "", fmt.Errorf("Spec does not reference a RBD volume type")
}
func getVolumeSourceFSType(spec *volume.Spec) (string, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD.FSType, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
return spec.PersistentVolume.Spec.RBD.FSType, nil
}
return "", fmt.Errorf("Spec does not reference a RBD volume type")
}
func getVolumeSourcePool(spec *volume.Spec) (string, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD.RBDPool, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
return spec.PersistentVolume.Spec.RBD.RBDPool, nil
}
return "", fmt.Errorf("Spec does not reference a RBD volume type")
}
func getVolumeSourceUser(spec *volume.Spec) (string, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD.RadosUser, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
return spec.PersistentVolume.Spec.RBD.RadosUser, nil
}
return "", fmt.Errorf("Spec does not reference a RBD volume type")
}
func getVolumeSourceKeyRing(spec *volume.Spec) (string, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD.Keyring, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
return spec.PersistentVolume.Spec.RBD.Keyring, nil
}
return "", fmt.Errorf("Spec does not reference a RBD volume type")
}
func getVolumeSourceReadOnly(spec *volume.Spec) (bool, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
// rbd volumes used as a PersistentVolume gets the ReadOnly flag indirectly through
// the persistent-claim volume used to mount the PV
return spec.ReadOnly, nil
}
return false, fmt.Errorf("Spec does not reference a RBD volume type")
}
func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (string, error) {
secret, err := volutil.GetSecretForPod(pod, secretName, kubeClient)
if err != nil {
glog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName)
return "", fmt.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName)
}
return parseSecretMap(secret)
}
func parsePVSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) {
secret, err := volutil.GetSecretForPV(namespace, secretName, rbdPluginName, kubeClient)
if err != nil {
glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName)
return "", fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName)
}
return parseSecretMap(secret)
}
// parseSecretMap locates the secret by key name.
func parseSecretMap(secretMap map[string]string) (string, error) {
if len(secretMap) == 0 {
return "", fmt.Errorf("empty secret map")
}
secret := ""
for k, v := range secretMap {
if k == secretKeyName {
return v, nil
}
secret = v
}
// If not found, the last secret in the map wins as done before
return secret, nil
}
func getSecretNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
localSecretRef := spec.Volume.RBD.SecretRef
if localSecretRef != nil {
return localSecretRef.Name, defaultNamespace, nil
}
return "", "", nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
secretRef := spec.PersistentVolume.Spec.RBD.SecretRef
secretNs := defaultNamespace
if secretRef != nil {
if len(secretRef.Namespace) != 0 {
secretNs = secretRef.Namespace
}
return secretRef.Name, secretNs, nil
}
return "", "", nil
}
return "", "", fmt.Errorf("Spec does not reference an RBD volume type")
}

437
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_test.go generated vendored Normal file
View File

@ -0,0 +1,437 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/rbd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
type fakeDiskManager struct {
// Make sure we can run tests in parallel.
mutex sync.RWMutex
// Key format: "<pool>/<image>"
rbdImageLocks map[string]bool
rbdMapIndex int
rbdDevices map[string]bool
}
func NewFakeDiskManager() *fakeDiskManager {
return &fakeDiskManager{
rbdImageLocks: make(map[string]bool),
rbdMapIndex: 0,
rbdDevices: make(map[string]bool),
}
}
func (fake *fakeDiskManager) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) AttachDisk(b rbdMounter) (string, error) {
fake.mutex.Lock()
defer fake.mutex.Unlock()
fake.rbdMapIndex += 1
devicePath := fmt.Sprintf("/dev/rbd%d", fake.rbdMapIndex)
fake.rbdDevices[devicePath] = true
return devicePath, nil
}
func (fake *fakeDiskManager) DetachDisk(r *rbdPlugin, deviceMountPath string, device string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
ok := fake.rbdDevices[device]
if !ok {
return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device)
}
delete(fake.rbdDevices, device)
return nil
}
func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error) {
return nil, 0, fmt.Errorf("not implemented")
}
func (fake *fakeDiskManager) DeleteImage(deleter *rbdVolumeDeleter) error {
return fmt.Errorf("not implemented")
}
func (fake *fakeDiskManager) Fencing(r rbdMounter, nodeName string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
if ok && isLocked {
// not expected in testing
return fmt.Errorf("%s is already locked", key)
}
fake.rbdImageLocks[key] = true
return nil
}
func (fake *fakeDiskManager) Defencing(r rbdMounter, nodeName string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
if !ok || !isLocked {
// not expected in testing
return fmt.Errorf("%s is not locked", key)
}
delete(fake.rbdImageLocks, key)
return nil
}
func (fake *fakeDiskManager) IsLocked(r rbdMounter, nodeName string) (bool, error) {
fake.mutex.RLock()
defer fake.mutex.RUnlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
return ok && isLocked, nil
}
func (fake *fakeDiskManager) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
return resource.Quantity{}, fmt.Errorf("not implemented")
}
// checkMounterLog checks fakeMounter must have expected logs, and the last action msut equal to expectedAction.
func checkMounterLog(t *testing.T, fakeMounter *mount.FakeMounter, expected int, expectedAction mount.FakeAction) {
if len(fakeMounter.Log) != expected {
t.Fatalf("fakeMounter should have %d logs, actual: %d", expected, len(fakeMounter.Log))
}
lastIndex := len(fakeMounter.Log) - 1
lastAction := fakeMounter.Log[lastIndex]
if !reflect.DeepEqual(expectedAction, lastAction) {
t.Fatalf("fakeMounter.Log[%d] should be %#v, not: %#v", lastIndex, expectedAction, lastAction)
}
}
func doTestPlugin(t *testing.T, c *testcase) {
fakeVolumeHost := volumetest.NewFakeVolumeHost(c.root, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter)
fakeNodeName := types.NodeName("localhost")
fdm := NewFakeDiskManager()
// attacher
attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
deviceAttachPath, err := attacher.Attach(c.spec, fakeNodeName)
if err != nil {
t.Fatal(err)
}
devicePath, err := attacher.WaitForAttach(c.spec, deviceAttachPath, c.pod, time.Second*10)
if err != nil {
t.Fatal(err)
}
if devicePath != c.expectedDevicePath {
t.Errorf("Unexpected path, expected %q, not: %q", c.expectedDevicePath, devicePath)
}
deviceMountPath, err := attacher.GetDeviceMountPath(c.spec)
if err != nil {
t.Fatal(err)
}
if deviceMountPath != c.expectedDeviceMountPath {
t.Errorf("Unexpected mount path, expected %q, not: %q", c.expectedDeviceMountPath, deviceMountPath)
}
err = attacher.MountDevice(c.spec, devicePath, deviceMountPath)
if err != nil {
t.Fatal(err)
}
if _, err := os.Stat(deviceMountPath); err != nil {
if os.IsNotExist(err) {
t.Errorf("Attacher.MountDevice() failed, device mount path not created: %s", deviceMountPath)
} else {
t.Errorf("Attacher.MountDevice() failed: %v", err)
}
}
checkMounterLog(t, fakeMounter, 1, mount.FakeAction{Action: "mount", Target: c.expectedDeviceMountPath, Source: devicePath, FSType: "ext4"})
// mounter
mounter, err := plug.(*rbdPlugin).newMounterInternal(c.spec, c.pod.UID, fdm, "secrets")
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
path := mounter.GetPath()
if path != c.expectedPodMountPath {
t.Errorf("Unexpected path, expected %q, got: %q", c.expectedPodMountPath, path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
checkMounterLog(t, fakeMounter, 2, mount.FakeAction{Action: "mount", Target: c.expectedPodMountPath, Source: devicePath, FSType: ""})
// unmounter
unmounter, err := plug.(*rbdPlugin).newUnmounterInternal(c.spec.Name(), c.pod.UID, fdm)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
checkMounterLog(t, fakeMounter, 3, mount.FakeAction{Action: "unmount", Target: c.expectedPodMountPath, Source: "", FSType: ""})
// detacher
detacher, err := plug.(*rbdPlugin).newDetacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
err = detacher.UnmountDevice(deviceMountPath)
if err != nil {
t.Fatalf("Detacher.UnmountDevice failed to unmount %s", deviceMountPath)
}
checkMounterLog(t, fakeMounter, 4, mount.FakeAction{Action: "unmount", Target: c.expectedDeviceMountPath, Source: "", FSType: ""})
err = detacher.Detach(deviceMountPath, fakeNodeName)
if err != nil {
t.Fatalf("Detacher.Detach failed to detach %s from %s", deviceMountPath, fakeNodeName)
}
}
type testcase struct {
spec *volume.Spec
root string
pod *v1.Pod
expectedDevicePath string
expectedDeviceMountPath string
expectedPodMountPath string
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
tmpDir, err = filepath.EvalSymlinks(tmpDir)
if err != nil {
t.Fatal(err)
}
podUID := uuid.NewUUID()
var cases []*testcase
cases = append(cases, &testcase{
spec: volume.NewSpecFromVolume(&v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "pool1",
RBDImage: "image1",
FSType: "ext4",
},
},
}),
root: tmpDir,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "testns",
UID: podUID,
},
},
expectedDevicePath: "/dev/rbd1",
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool1-image-image1", tmpDir),
expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol1", tmpDir, podUID),
})
cases = append(cases, &testcase{
spec: volume.NewSpecFromPersistentVolume(&v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol2",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "pool2",
RBDImage: "image2",
FSType: "ext4",
},
},
},
}, false),
root: tmpDir,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "testns",
UID: podUID,
},
},
expectedDevicePath: "/dev/rbd1",
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool2-image-image2", tmpDir),
expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol2", tmpDir, podUID),
})
for i := 0; i < len(cases); i++ {
doTestPlugin(t, cases[i])
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(rbdPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestGetSecretNameAndNamespace(t *testing.T) {
secretName := "test-secret-name"
secretNamespace := "test-secret-namespace"
volSpec := &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
secretRef := new(v1.SecretReference)
secretRef.Name = secretName
secretRef.Namespace = secretNamespace
volSpec.PersistentVolume.Spec.PersistentVolumeSource.RBD.SecretRef = secretRef
foundSecretName, foundSecretNamespace, err := getSecretNameAndNamespace(volSpec, "default")
if err != nil {
t.Errorf("getSecretNameAndNamespace failed to get Secret's name and namespace: %v", err)
}
if strings.Compare(secretName, foundSecretName) != 0 || strings.Compare(secretNamespace, foundSecretNamespace) != 0 {
t.Errorf("getSecretNameAndNamespace returned incorrect values, expected %s and %s but got %s and %s", secretName, secretNamespace, foundSecretName, foundSecretNamespace)
}
}

629
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go generated vendored Normal file
View File

@ -0,0 +1,629 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// utility functions to setup rbd volume
// mainly implement diskManager interface
//
package rbd
import (
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
fileutil "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
imageWatcherStr = "watcher="
imageSizeStr = "size "
sizeDivStr = " MB in"
kubeLockMagic = "kubelet_lock_magic_"
)
var (
clientKubeLockMagicRe = regexp.MustCompile("client.* " + kubeLockMagic + ".*")
)
// search /sys/bus for rbd device that matches given pool and image
func getDevFromImageAndPool(pool, image string) (string, bool) {
// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
sys_path := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sys_path); err == nil {
for _, f := range dirs {
// pool and name format:
// see rbd_pool_show() and rbd_name_show() at
// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
name := f.Name()
// first match pool, then match name
poolFile := path.Join(sys_path, name, "pool")
poolBytes, err := ioutil.ReadFile(poolFile)
if err != nil {
glog.V(4).Infof("Error reading %s: %v", poolFile, err)
continue
}
if strings.TrimSpace(string(poolBytes)) != pool {
glog.V(4).Infof("Device %s is not %q: %q", name, pool, string(poolBytes))
continue
}
imgFile := path.Join(sys_path, name, "name")
imgBytes, err := ioutil.ReadFile(imgFile)
if err != nil {
glog.V(4).Infof("Error reading %s: %v", imgFile, err)
continue
}
if strings.TrimSpace(string(imgBytes)) != image {
glog.V(4).Infof("Device %s is not %q: %q", name, image, string(imgBytes))
continue
}
// found a match, check if device exists
devicePath := "/dev/rbd" + name
if _, err := os.Lstat(devicePath); err == nil {
return devicePath, true
}
}
}
return "", false
}
// stat a path, if not exists, retry maxRetries times
func waitForPath(pool, image string, maxRetries int) (string, bool) {
for i := 0; i < maxRetries; i++ {
devicePath, found := getDevFromImageAndPool(pool, image)
if found {
return devicePath, true
}
if i == maxRetries-1 {
break
}
time.Sleep(time.Second)
}
return "", false
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image
func makePDNameInternal(host volume.VolumeHost, pool string, image string) string {
return path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image)
}
// RBDUtil implements diskManager interface.
type RBDUtil struct{}
var _ diskManager = &RBDUtil{}
func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func rbdErrors(runErr, resultErr error) error {
if err, ok := runErr.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
return fmt.Errorf("rbd: rbd cmd not found")
}
}
return resultErr
}
// rbdLock acquires a lock on image if lock is true, otherwise releases if a
// lock is found on image.
func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error {
var err error
var output, locker string
var cmd []byte
var secret_opt []string
if b.Secret != "" {
secret_opt = []string{"--key=" + b.Secret}
} else {
secret_opt = []string{"-k", b.Keyring}
}
if len(b.adminId) == 0 {
b.adminId = b.Id
}
if len(b.adminSecret) == 0 {
b.adminSecret = b.Secret
}
// construct lock id using host name and a magic prefix
lock_id := kubeLockMagic + node.GetHostname("")
l := len(b.Mon)
// avoid mount storm, pick a host randomly
start := rand.Int() % l
// iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ {
mon := b.Mon[i%l]
// cmd "rbd lock list" serves two purposes:
// for fencing, check if lock already held for this host
// this edge case happens if host crashes in the middle of acquiring lock and mounting rbd
// for defencing, get the locker name, something like "client.1234"
args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
output = string(cmd)
glog.Infof("lock list output %q", output)
if err != nil {
continue
}
if lock {
// check if lock is already held for this host by matching lock_id and rbd lock id
if strings.Contains(output, lock_id) {
// this host already holds the lock, exit
glog.V(1).Infof("rbd: lock already held for %s", lock_id)
return nil
}
// clean up orphaned lock if no watcher on the image
used, rbdOutput, statusErr := util.rbdStatus(&b)
if statusErr != nil {
return fmt.Errorf("rbdStatus failed error %v, rbd output: %v", statusErr, rbdOutput)
}
if used {
// this image is already used by a node other than this node
return fmt.Errorf("rbd image: %s/%s is already used by a node other than this node, rbd output: %v", b.Image, b.Pool, output)
}
// best effort clean up orphaned locked if not used
locks := clientKubeLockMagicRe.FindAllStringSubmatch(output, -1)
for _, v := range locks {
if len(v) > 0 {
lockInfo := strings.Split(v[0], " ")
if len(lockInfo) > 2 {
args := []string{"lock", "remove", b.Image, lockInfo[1], lockInfo[0], "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
glog.Infof("remove orphaned locker %s from client %s: err %v, rbd output: %s", lockInfo[1], lockInfo[0], err, string(cmd))
}
}
}
// hold a lock: rbd lock add
args := []string{"lock", "add", b.Image, lock_id, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
if err == nil {
glog.V(4).Infof("rbd: successfully add lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon)
}
} else {
// defencing, find locker name
ind := strings.LastIndex(output, lock_id) - 1
for i := ind; i >= 0; i-- {
if output[i] == '\n' {
locker = output[(i + 1):ind]
break
}
}
// remove a lock if found: rbd lock remove
if len(locker) > 0 {
args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
if err == nil {
glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon)
}
}
}
if err == nil {
// break if operation succeeds
break
}
}
return err
}
// AttachDisk attaches the disk on the node.
// If Volume is not read-only, acquire a lock on image first.
func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) {
var err error
var output []byte
globalPDPath := util.MakeGlobalPDName(*b.rbd)
if pathExists, pathErr := volutil.PathExists(globalPDPath); pathErr != nil {
return "", fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return "", err
}
}
devicePath, found := waitForPath(b.Pool, b.Image, 1)
if !found {
_, err = b.exec.Run("modprobe", "rbd")
if err != nil {
glog.Warningf("rbd: failed to load rbd kernel module:%v", err)
}
// Currently, we don't acquire advisory lock on image, but for backward
// compatibility, we need to check if the image is being used by nodes running old kubelet.
found, rbdOutput, err := util.rbdStatus(&b)
if err != nil {
return "", fmt.Errorf("error: %v, rbd output: %v", err, rbdOutput)
}
if found {
glog.Infof("rbd image %s/%s is still being used ", b.Pool, b.Image)
return "", fmt.Errorf("rbd image %s/%s is still being used. rbd output: %s", b.Pool, b.Image, rbdOutput)
}
// rbd map
l := len(b.Mon)
// avoid mount storm, pick a host randomly
start := rand.Int() % l
// iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ {
mon := b.Mon[i%l]
glog.V(1).Infof("rbd: map mon %s", mon)
if b.Secret != "" {
output, err = b.exec.Run("rbd",
"map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "--key="+b.Secret)
} else {
output, err = b.exec.Run("rbd",
"map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "-k", b.Keyring)
}
if err == nil {
break
}
glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output))
}
if err != nil {
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
}
devicePath, found = waitForPath(b.Pool, b.Image, 10)
if !found {
return "", fmt.Errorf("Could not map image %s/%s, Timeout after 10s", b.Pool, b.Image)
}
}
return devicePath, nil
}
// DetachDisk detaches the disk from the node.
// It detaches device from the node if device is provided, and removes the lock
// if there is persisted RBD info under deviceMountPath.
func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error {
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
// rbd unmap
exec := plugin.host.GetExec(plugin.GetPluginName())
output, err := exec.Run("rbd", "unmap", device)
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %v", device, err, output))
}
glog.V(3).Infof("rbd: successfully unmap device %s", device)
// Currently, we don't persist rbd info on the disk, but for backward
// compatbility, we need to clean it if found.
rbdFile := path.Join(deviceMountPath, "rbd.json")
exists, err := fileutil.FileExists(rbdFile)
if err != nil {
return err
}
if exists {
glog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath)
err = util.cleanOldRBDFile(plugin, rbdFile)
if err != nil {
glog.Errorf("rbd: failed to clean %s", rbdFile)
return err
}
glog.V(3).Infof("rbd: successfully remove %s", rbdFile)
}
return nil
}
// cleanOldRBDFile read rbd info from rbd.json file and removes lock if found.
// At last, it removes rbd.json file.
func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error {
mounter := &rbdMounter{
// util.rbdLock needs it to run command.
rbd: newRBD("", "", "", "", false, plugin, util),
}
fp, err := os.Open(rbdFile)
if err != nil {
return fmt.Errorf("rbd: open err %s/%s", rbdFile, err)
}
defer fp.Close()
decoder := json.NewDecoder(fp)
if err = decoder.Decode(mounter); err != nil {
return fmt.Errorf("rbd: decode err: %v.", err)
}
if err != nil {
glog.Errorf("failed to load rbd info from %s: %v", rbdFile, err)
return err
}
// remove rbd lock if found
// the disk is not attached to this node anymore, so the lock on image
// for this node can be removed safely
err = util.rbdLock(*mounter, false)
if err == nil {
os.Remove(rbdFile)
}
return err
}
func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, size int, err error) {
var output []byte
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// convert to MB that rbd defaults on
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
volSz := fmt.Sprintf("%d", sz)
// rbd create
l := len(p.rbdMounter.Mon)
// pick a mon randomly
start := rand.Int() % l
// iterate all monitors until create succeeds.
for i := start; i < start+l; i++ {
mon := p.Mon[i%l]
if p.rbdMounter.imageFormat == rbdImageFormat2 {
glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
} else {
glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
}
args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat}
if p.rbdMounter.imageFormat == rbdImageFormat2 {
// if no image features is provided, it results in empty string
// which disable all RBD image format 2 features as we expected
features := strings.Join(p.rbdMounter.imageFeatures, ",")
args = append(args, "--image-feature", features)
}
output, err = p.exec.Run("rbd", args...)
if err == nil {
break
} else {
glog.Warningf("failed to create rbd image, output %v", string(output))
}
}
if err != nil {
return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output))
}
return &v1.RBDPersistentVolumeSource{
CephMonitors: p.rbdMounter.Mon,
RBDImage: p.rbdMounter.Image,
RBDPool: p.rbdMounter.Pool,
}, sz, nil
}
func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error {
var output []byte
found, rbdOutput, err := util.rbdStatus(p.rbdMounter)
if err != nil {
return fmt.Errorf("error %v, rbd output: %v", err, rbdOutput)
}
if found {
glog.Info("rbd is still being used ", p.rbdMounter.Image)
return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput)
}
// rbd rm
l := len(p.rbdMounter.Mon)
// pick a mon randomly
start := rand.Int() % l
// iterate all monitors until rm succeeds.
for i := start; i < start+l; i++ {
mon := p.rbdMounter.Mon[i%l]
glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
output, err = p.exec.Run("rbd",
"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret)
if err == nil {
return nil
} else {
glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
}
}
return fmt.Errorf("error %v, rbd output: %v", err, string(output))
}
// ExpandImage runs rbd resize command to resize the specified image
func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
var output []byte
var err error
volSizeBytes := newSize.Value()
// convert to MB that rbd defaults on
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
newVolSz := fmt.Sprintf("%d", sz)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz))
// check the current size of rbd image, if equals to or greater that the new request size, do nothing
curSize, infoErr := util.rbdInfo(rbdExpander.rbdMounter)
if infoErr != nil {
return oldSize, fmt.Errorf("rbd info failed, error: %v", infoErr)
}
if curSize >= sz {
return newSizeQuant, nil
}
// rbd resize
l := len(rbdExpander.rbdMounter.Mon)
// pick a mon randomly
start := rand.Int() % l
// iterate all monitors until resize succeeds.
for i := start; i < start+l; i++ {
mon := rbdExpander.rbdMounter.Mon[i%l]
glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret)
output, err = rbdExpander.exec.Run("rbd",
"resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret)
if err == nil {
return newSizeQuant, nil
} else {
glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output))
}
}
return oldSize, err
}
// rbdInfo runs `rbd info` command to get the current image size in MB
func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) {
var err error
var output string
var cmd []byte
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.adminId
secret := b.adminSecret
if id == "" {
id = b.Id
secret = b.Secret
}
l := len(b.Mon)
start := rand.Int() % l
// iterate all hosts until rbd command succeeds.
for i := start; i < start+l; i++ {
mon := b.Mon[i%l]
// cmd "rbd info" get the image info with the following output:
//
// # image exists (exit=0)
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08
// size 1024 MB in 256 objects
// order 22 (4096 kB objects)
// block_name_prefix: rbd_data.1253ac238e1f29
// format: 2
// ...
//
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json
// {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]}
//
//
// # image does not exist (exit=2)
// rbd: error opening image 1234: (2) No such file or directory
//
glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
// break if command succeeds
if err == nil {
break
}
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
// fail fast if command not found
return 0, err
}
}
}
// If command never succeed, returns its last error.
if err != nil {
return 0, err
}
if len(output) == 0 {
return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, output)
}
// get the size value string, just between `size ` and ` MB in`, such as `size 1024 MB in 256 objects`
sizeIndex := strings.Index(output, imageSizeStr)
divIndex := strings.Index(output, sizeDivStr)
if sizeIndex == -1 || divIndex == -1 || divIndex <= sizeIndex+5 {
return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, output)
}
rbdSizeStr := output[sizeIndex+5 : divIndex]
rbdSize, err := strconv.Atoi(rbdSizeStr)
if err != nil {
return 0, fmt.Errorf("can not convert size str: %s to int", rbdSizeStr)
}
return rbdSize, nil
}
// rbdStatus runs `rbd status` command to check if there is watcher on the image.
func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) {
var err error
var output string
var cmd []byte
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.adminId
secret := b.adminSecret
if id == "" {
id = b.Id
secret = b.Secret
}
l := len(b.Mon)
start := rand.Int() % l
// iterate all hosts until rbd command succeeds.
for i := start; i < start+l; i++ {
mon := b.Mon[i%l]
// cmd "rbd status" list the rbd client watch with the following output:
//
// # there is a watcher (exit=0)
// Watchers:
// watcher=10.16.153.105:0/710245699 client.14163 cookie=1
//
// # there is no watcher (exit=0)
// Watchers: none
//
// Otherwise, exit is non-zero, for example:
//
// # image does not exist (exit=2)
// rbd: error opening image kubernetes-dynamic-pvc-<UUID>: (2) No such file or directory
//
glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
// break if command succeeds
if err == nil {
break
}
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
// fail fast if command not found
return false, output, err
}
}
}
// If command never succeed, returns its last error.
if err != nil {
return false, output, err
}
if strings.Contains(output, imageWatcherStr) {
glog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output)
return true, output, nil
} else {
glog.Warningf("rbd: no watchers on %s", b.Image)
return false, output, nil
}
}