Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -11,27 +11,31 @@ go_library(
srcs = [
"attacher.go",
"cinder.go",
"cinder_block.go",
"cinder_util.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/cinder",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/openstack:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@ -40,19 +44,22 @@ go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"cinder_block_test.go",
"cinder_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -24,10 +24,10 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
@ -40,8 +40,12 @@ type cinderDiskAttacher struct {
var _ volume.Attacher = &cinderDiskAttacher{}
var _ volume.DeviceMounter = &cinderDiskAttacher{}
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
var _ volume.DeviceMountableVolumePlugin = &cinderPlugin{}
const (
probeVolumeInitDelay = 1 * time.Second
probeVolumeFactor = 2.0
@ -67,9 +71,13 @@ func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (plugin *cinderPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
@ -137,31 +145,31 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.Nod
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
if err != nil {
// Log error and continue with attach
glog.Warningf(
klog.Warningf(
"Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v",
volumeID, instanceID, err)
}
if err == nil && attached {
// Volume is already attached to instance.
glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
klog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
} else {
_, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID)
if err == nil {
if err = attacher.waitDiskAttached(instanceID, volumeID); err != nil {
glog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err)
klog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err)
return "", err
}
glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
klog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
} else {
glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
klog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
return "", err
}
}
devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID)
if err != nil {
glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
klog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
return "", err
}
@ -175,7 +183,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
for _, spec := range specs {
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
@ -187,7 +195,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
if err != nil {
// Log error and continue with attach
glog.Errorf(
klog.Errorf(
"Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
@ -197,7 +205,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
@ -223,7 +231,7 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
klog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
probeAttachedVolume()
if !attacher.cinderProvider.ShouldTrustDevicePath() {
// Using the Cinder volume ID, find the real device path (See Issue #33128)
@ -231,11 +239,11 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
}
exists, err := volumeutil.PathExists(devicePath)
if exists && err == nil {
glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
klog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
return devicePath, nil
}
// Log an error, and continue checking periodically
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
klog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
// Using exponential backoff instead of linear
ticker.Stop()
duration = time.Duration(float64(duration) * probeVolumeFactor)
@ -299,6 +307,8 @@ type cinderDiskDetacher struct {
var _ volume.Detacher = &cinderDiskDetacher{}
var _ volume.DeviceUnmounter = &cinderDiskDetacher{}
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
cinder, err := plugin.getCloudProvider()
if err != nil {
@ -310,6 +320,10 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (plugin *cinderPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
backoff := wait.Backoff{
Duration: operationFinishInitDelay,
@ -365,26 +379,26 @@ func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.Nod
attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID)
if err != nil {
// Log error and continue with detach
glog.Errorf(
klog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volumeID, nodeName, err)
}
if err == nil && !attached {
// Volume is already detached from node.
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
return nil
}
if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil {
glog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err)
klog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err)
return err
}
if err = detacher.waitDiskDetached(instanceID, volumeID); err != nil {
glog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err)
klog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err)
return err
}
glog.Infof("detached volume %q from node %q", volumeID, nodeName)
klog.Infof("detached volume %q from node %q", volumeID, nodeName)
return nil
}

View File

@ -24,15 +24,15 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"fmt"
"sort"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
)
const (
@ -468,7 +468,7 @@ func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error
return "", errors.New("unexpected AttachDisk call: wrong instanceID")
}
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
klog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
testcase.attachOrDetach = &attachStatus
return expected.retDeviceName, expected.ret
@ -494,7 +494,7 @@ func (testcase *testcase) DetachDisk(instanceID, volumeID string) error {
return errors.New("unexpected DetachDisk call: wrong instanceID")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
testcase.attachOrDetach = &detachStatus
return expected.ret
@ -504,11 +504,11 @@ func (testcase *testcase) OperationPending(diskName string) (bool, string, error
expected := &testcase.operationPending
if expected.volumeStatus == VolumeStatusPending {
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
klog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
return true, expected.volumeStatus, expected.ret
}
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
klog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
return false, expected.volumeStatus, expected.ret
}
@ -542,7 +542,7 @@ func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, err
return false, errors.New("unexpected DiskIsAttached call: wrong instanceID")
}
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret
}
@ -566,7 +566,7 @@ func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (st
return "", errors.New("unexpected GetAttachmentDiskPath call: wrong instanceID")
}
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
klog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
return expected.retPath, expected.ret
}
@ -610,13 +610,13 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID")
}
glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
klog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
return expected.isAttached, expected.instanceID, expected.ret
}
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
return "", "", false, errors.New("Not implemented")
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) {
return "", "", "", false, errors.New("Not implemented")
}
func (testcase *testcase) GetDevicePath(volumeID string) string {
@ -664,7 +664,7 @@ func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong instanceID")
}
glog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret)
klog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret)
return expected.areAttached, expected.ret
}
@ -694,7 +694,7 @@ func (testcase *testcase) DisksAreAttachedByName(nodeName types.NodeName, volume
return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong instanceID")
}
glog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret)
klog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret)
return expected.areAttached, expected.ret
}

View File

@ -22,13 +22,15 @@ import (
"os"
"path"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
utilfeature "k8s.io/apiserver/pkg/util/feature"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
@ -51,7 +53,7 @@ type BlockStorageProvider interface {
AttachDisk(instanceID, volumeID string) (string, error)
DetachDisk(instanceID, volumeID string) error
DeleteVolume(volumeID string) error
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error)
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error)
GetDevicePath(volumeID string) string
InstanceID() (string, error)
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
@ -85,7 +87,7 @@ func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
func (plugin *cinderPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.volumeLocks = keymutex.NewKeyMutex()
plugin.volumeLocks = keymutex.NewHashed(0)
return nil
}
@ -146,7 +148,9 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
},
fsType: fsType,
readOnly: readOnly,
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
mountOptions: util.MountOptionFromSpec(spec),
}, nil
}
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
@ -230,7 +234,7 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*
if err != nil {
return nil, err
}
glog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath)
klog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath)
cinderVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
@ -259,10 +263,17 @@ func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resour
return oldSize, err
}
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value()))
klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value()))
return expandedSize, nil
}
func (plugin *cinderPlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error {
_, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath)
return err
}
var _ volume.FSResizableVolumePlugin = &cinderPlugin{}
func (plugin *cinderPlugin) RequiresFSResize() bool {
return true
}
@ -274,7 +285,7 @@ type cdManager interface {
// Detaches the disk from the kubelet's host machine.
DetachDisk(unmounter *cinderVolumeUnmounter) error
// Creates a volume
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
CreateVolume(provisioner *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *cinderVolumeDeleter) error
}
@ -286,6 +297,7 @@ type cinderVolumeMounter struct {
fsType string
readOnly bool
blockDeviceMounter *mount.SafeFormatAndMount
mountOptions []string
}
// cinderPersistentDisk volumes are disk resources provided by C3
@ -330,18 +342,18 @@ func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error {
// SetUp bind mounts to the volume path.
func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
klog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
b.plugin.volumeLocks.LockKey(b.pdName)
defer b.plugin.volumeLocks.UnlockKey(b.pdName)
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("Cannot validate mount point: %s %v", dir, err)
klog.Errorf("Cannot validate mount point: %s %v", dir, err)
return err
}
if !notmnt {
glog.V(4).Infof("Something is already mounted to target %s", dir)
klog.V(4).Infof("Something is already mounted to target %s", dir)
return nil
}
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
@ -352,45 +364,46 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
klog.V(4).Infof("Could not create directory %s: %v", dir, err)
return err
}
mountOptions := util.JoinMountOptions(options, b.mountOptions)
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, options)
klog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, mountOptions)
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
glog.V(4).Infof("Mount failed: %v", err)
klog.V(4).Infof("Mount failed: %v", err)
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
klog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
return err
}
}
os.Remove(dir)
glog.Errorf("Failed to mount %s: %v", dir, err)
klog.Errorf("Failed to mount %s: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir)
klog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir)
return nil
}
@ -419,60 +432,60 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
glog.V(5).Infof("Cinder TearDown of %s", dir)
klog.V(5).Infof("Cinder TearDown of %s", dir)
notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
klog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
return err
}
if notmnt {
glog.V(4).Infof("Nothing is mounted to %s, ignoring", dir)
klog.V(4).Infof("Nothing is mounted to %s, ignoring", dir)
return os.Remove(dir)
}
// Find Cinder volumeID to lock the right volume
// TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like
// NewMounter. We could then find volumeID there without probing MountRefs.
refs, err := mount.GetMountRefs(c.mounter, dir)
refs, err := c.mounter.GetMountRefs(dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
klog.V(4).Infof("GetMountRefs failed: %v", err)
return err
}
if len(refs) == 0 {
glog.V(4).Infof("Directory %s is not mounted", dir)
klog.V(4).Infof("Directory %s is not mounted", dir)
return fmt.Errorf("directory %s is not mounted", dir)
}
c.pdName = path.Base(refs[0])
glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)
klog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
c.plugin.volumeLocks.LockKey(c.pdName)
defer c.plugin.volumeLocks.UnlockKey(c.pdName)
// Reload list of references, there might be SetUpAt finished in the meantime
refs, err = mount.GetMountRefs(c.mounter, dir)
refs, err = c.mounter.GetMountRefs(dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
klog.V(4).Infof("GetMountRefs failed: %v", err)
return err
}
if err := c.mounter.Unmount(dir); err != nil {
glog.V(4).Infof("Unmount failed: %v", err)
klog.V(4).Infof("Unmount failed: %v", err)
return err
}
glog.V(3).Infof("Successfully unmounted: %s\n", dir)
klog.V(3).Infof("Successfully unmounted: %s\n", dir)
notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notmnt {
if err := os.Remove(dir); err != nil {
glog.V(4).Infof("Failed to remove directory after unmount: %v", err)
klog.V(4).Infof("Failed to remove directory after unmount: %v", err)
return err
}
}
@ -505,15 +518,20 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies)
if err != nil {
return nil, err
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = c.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fstype = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
@ -528,6 +546,7 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: volumeID,
@ -542,6 +561,21 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
requirements := make([]v1.NodeSelectorRequirement, 0)
for k, v := range labels {
if v != "" {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}})
}
}
if len(requirements) > 0 {
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements
}
}
return pv, nil
}

View File

@ -0,0 +1,167 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"fmt"
"path/filepath"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
var _ volume.VolumePlugin = &cinderPlugin{}
var _ volume.PersistentVolumePlugin = &cinderPlugin{}
var _ volume.BlockVolumePlugin = &cinderPlugin{}
var _ volume.DeletableVolumePlugin = &cinderPlugin{}
var _ volume.ProvisionableVolumePlugin = &cinderPlugin{}
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
func (plugin *cinderPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(cinderVolumePluginName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
return nil, err
}
klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) <= 1 {
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
}
return getVolumeSpecFromGlobalMapPath(globalMapPath)
}
func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) {
// Get volume spec information from globalMapPath
// globalMapPath example:
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
// plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX
vID := filepath.Base(globalMapPath)
if len(vID) <= 1 {
return nil, fmt.Errorf("failed to get volumeID from global path=%s", globalMapPath)
}
block := v1.PersistentVolumeBlock
cinderVolume := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: vID,
},
},
VolumeMode: &block,
},
}
return volume.NewSpecFromPersistentVolume(cinderVolume, true), nil
}
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
func (plugin *cinderPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
// Pass empty string as dummy uid since uid isn't used in the case.
var uid types.UID
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
pdName, fsType, readOnly, err := getVolumeInfo(spec)
if err != nil {
return nil, err
}
return &cinderVolumeMapper{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
fsType: fsType,
manager: manager,
mounter: mounter,
plugin: plugin,
},
readOnly: readOnly}, nil
}
func (plugin *cinderPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newUnmapperInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) {
return &cinderPluginUnmapper{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}}, nil
}
func (c *cinderPluginUnmapper) TearDownDevice(mapPath, devicePath string) error {
return nil
}
type cinderPluginUnmapper struct {
*cinderVolume
}
var _ volume.BlockVolumeUnmapper = &cinderPluginUnmapper{}
type cinderVolumeMapper struct {
*cinderVolume
readOnly bool
}
var _ volume.BlockVolumeMapper = &cinderVolumeMapper{}
func (b *cinderVolumeMapper) SetUpDevice() (string, error) {
return "", nil
}
func (b *cinderVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
// plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX
func (cd *cinderVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
pdName, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
return filepath.Join(cd.plugin.host.GetVolumeDevicePluginDir(cinderVolumePluginName), pdName), nil
}
// GetPodDeviceMapPath returns pod device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~cinder
func (cd *cinderVolume) GetPodDeviceMapPath() (string, string) {
name := cinderVolumePluginName
return cd.plugin.host.GetPodVolumeDeviceDir(cd.podUID, kstrings.EscapeQualifiedNameForDisk(name)), cd.volName
}

View File

@ -0,0 +1,145 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
testVolName = "vol-1234"
testPVName = "pv1"
testGlobalPath = "plugins/kubernetes.io/cinder/volumeDevices/vol-1234"
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~cinder"
)
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// make our test path for fake GlobalMapPath
// /tmp symbolized our pluginDir
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/cinder/volumeDevices/pdVol1
tmpVDir, err := utiltesting.MkTmpdir("cinderBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
//Bad Path
badspec, err := getVolumeSpecFromGlobalMapPath("")
if badspec != nil || err == nil {
t.Errorf("Expected not to get spec from GlobalMapPath but did")
}
// Good Path
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath)
if spec == nil || err != nil {
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
}
if spec.PersistentVolume.Spec.Cinder.VolumeID != testVolName {
t.Errorf("Invalid volumeID from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.Cinder.VolumeID)
}
block := v1.PersistentVolumeBlock
specMode := spec.PersistentVolume.Spec.VolumeMode
if &specMode == nil {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", &specMode, block)
}
if *specMode != block {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
}
}
func getTestVolume(readOnly bool, isBlock bool) *volume.Spec {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: testPVName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: testVolName,
},
},
},
}
if isBlock {
blockMode := v1.PersistentVolumeBlock
pv.Spec.VolumeMode = &blockMode
}
return volume.NewSpecFromPersistentVolume(pv, readOnly)
}
func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("cinderBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
expectedPodPath := path.Join(tmpVDir, testPodPath)
spec := getTestVolume(false, true /*isBlock*/)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
plug, err := plugMgr.FindMapperPluginByName(cinderVolumePluginName)
if err != nil {
os.RemoveAll(tmpVDir)
t.Fatalf("Can't find the plugin by name: %q", cinderVolumePluginName)
}
if plug.GetPluginName() != cinderVolumePluginName {
t.Fatalf("Wrong name: %s", plug.GetPluginName())
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mapper == nil {
t.Fatalf("Got a nil Mounter")
}
//GetGlobalMapPath
gMapPath, err := mapper.GetGlobalMapPath(spec)
if err != nil || len(gMapPath) == 0 {
t.Fatalf("Invalid GlobalMapPath from spec: %s", spec.PersistentVolume.Spec.Cinder.VolumeID)
}
if gMapPath != expectedGlobalPath {
t.Errorf("Failed to get GlobalMapPath: %s %s", gMapPath, expectedGlobalPath)
}
//GetPodDeviceMapPath
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
if gDevicePath != expectedPodPath {
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
}
if gVolName != testPVName {
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
}
}

View File

@ -26,6 +26,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -116,8 +117,10 @@ func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
return nil
}
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
return "test-volume-name", 1, nil, "", nil
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
labels = make(map[string]string)
labels[kubeletapis.LabelZoneFailureDomain] = "nova"
return "test-volume-name", 1, labels, "", nil
}
func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error {
@ -192,7 +195,7 @@ func TestPlugin(t *testing.T) {
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
@ -210,6 +213,39 @@ func TestPlugin(t *testing.T) {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
// check nodeaffinity members
if persistentSpec.Spec.NodeAffinity == nil {
t.Errorf("Provision() returned unexpected nil NodeAffinity")
}
if persistentSpec.Spec.NodeAffinity.Required == nil {
t.Errorf("Provision() returned unexpected nil NodeAffinity.Required")
}
n := len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms)
if n != 1 {
t.Errorf("Provision() returned unexpected number of NodeSelectorTerms %d. Expected %d", n, 1)
}
n = len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions)
if n != 1 {
t.Errorf("Provision() returned unexpected number of MatchExpressions %d. Expected %d", n, 1)
}
req := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0]
if req.Key != kubeletapis.LabelZoneFailureDomain {
t.Errorf("Provision() returned unexpected requirement key in NodeAffinity %v", req.Key)
}
if req.Operator != v1.NodeSelectorOpIn {
t.Errorf("Provision() returned unexpected requirement operator in NodeAffinity %v", req.Operator)
}
if len(req.Values) != 1 || req.Values[0] != "nova" {
t.Errorf("Provision() returned unexpected requirement value in NodeAffinity %v", req.Values)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,

View File

@ -24,8 +24,8 @@ import (
"strings"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
@ -95,7 +95,7 @@ func (util *DiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) er
os.Remove(globalPDPath)
return err
}
glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
klog.V(2).Infof("Safe mount successful: %q\n", devicePath)
}
return nil
}
@ -109,7 +109,7 @@ func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
if err := os.Remove(globalPDPath); err != nil {
return err
}
glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
klog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
cloud, err := cd.plugin.getCloudProvider()
if err != nil {
@ -122,7 +122,7 @@ func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
if err = cloud.DetachDisk(instanceid, cd.pdName); err != nil {
return err
}
glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
klog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
return nil
}
@ -136,10 +136,10 @@ func (util *DiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error {
if err = cloud.DeleteVolume(cd.pdName); err != nil {
// OpenStack cloud provider returns volume.tryAgainError when necessary,
// no handling needed here.
glog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err)
klog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err)
return err
}
glog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName)
klog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName)
return nil
}
@ -149,7 +149,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
zones := make(sets.String)
nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
glog.V(2).Infof("Error listing nodes")
klog.V(2).Infof("Error listing nodes")
return zones, err
}
for _, node := range nodes.Items {
@ -157,21 +157,24 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
zones.Insert(zone)
}
}
glog.V(4).Infof("zones found: %v", zones)
klog.V(4).Infof("zones found: %v", zones)
return zones, nil
}
// CreateVolume uses the cloud provider entrypoint for creating a volume
func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
cloud, err := c.plugin.getCloudProvider()
if err != nil {
return "", 0, nil, "", err
}
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
volSizeGiB, err := volutil.RoundUpToGiBInt(capacity)
if err != nil {
return "", 0, nil, "", err
}
name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
vtype := ""
availability := ""
@ -198,29 +201,38 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
// No zone specified, choose one randomly in the same region
zones, err := getZonesFromNodes(c.plugin.host.GetKubeClient())
if err != nil {
glog.V(2).Infof("error getting zone information: %v", err)
klog.V(2).Infof("error getting zone information: %v", err)
return "", 0, nil, "", err
}
// if we did not get any zones, lets leave it blank and gophercloud will
// use zone "nova" as default
if len(zones) > 0 {
availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
availability, err = volutil.SelectZoneForVolume(false, false, "", nil, zones, node, allowedTopologies, c.options.PVC.Name)
if err != nil {
klog.V(2).Infof("error selecting zone for volume: %v", err)
return "", 0, nil, "", err
}
}
}
volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
if errr != nil {
glog.V(2).Infof("Error creating cinder volume: %v", errr)
return "", 0, nil, "", errr
volumeID, volumeAZ, volumeRegion, IgnoreVolumeAZ, err := cloud.CreateVolume(name, volSizeGiB, vtype, availability, c.options.CloudTags)
if err != nil {
klog.V(2).Infof("Error creating cinder volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created cinder volume %s", volumeID)
klog.V(2).Infof("Successfully created cinder volume %s", volumeID)
// these are needed that pod is spawning to same AZ
volumeLabels = make(map[string]string)
if IgnoreVolumeAZ == false {
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
if volumeAZ != "" {
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
}
if volumeRegion != "" {
volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion
}
}
return volumeID, volSizeGB, volumeLabels, fstype, nil
return volumeID, volSizeGiB, volumeLabels, fstype, nil
}
func probeAttachedVolume() error {
@ -236,17 +248,17 @@ func probeAttachedVolume() error {
cmdSettle := executor.Command("udevadm", argsSettle...)
_, errSettle := cmdSettle.CombinedOutput()
if errSettle != nil {
glog.Errorf("error running udevadm settle %v\n", errSettle)
klog.Errorf("error running udevadm settle %v\n", errSettle)
}
args := []string{"trigger"}
cmd := executor.Command("udevadm", args...)
_, err := cmd.CombinedOutput()
if err != nil {
glog.Errorf("error running udevadm trigger %v\n", err)
klog.Errorf("error running udevadm trigger %v\n", err)
return err
}
glog.V(4).Infof("Successfully probed all attachments")
klog.V(4).Infof("Successfully probed all attachments")
return nil
}