vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

View File

@ -0,0 +1,66 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"vsphere_volume.go",
"vsphere_volume_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/vsphere_volume",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"vsphere_volume_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/vsphere_volume",
library = ":go_default_library",
deps = [
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

16
vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS generated vendored Executable file
View File

@ -0,0 +1,16 @@
approvers:
- pmorie
- saad-ali
- thockin
- matchstick
- kerneltime
reviewers:
- abithap
- abrarshivani
- saad-ali
- justinsb
- jsafrane
- rootfs
- jingxu97
- msau42
- kerneltime

View File

@ -0,0 +1,296 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"os"
"path"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type vsphereVMDKAttacher struct {
host volume.VolumeHost
vsphereVolumes vsphere.Volumes
}
var _ volume.Attacher = &vsphereVMDKAttacher{}
var _ volume.AttachableVolumePlugin = &vsphereVolumePlugin{}
// Singleton key mutex for keeping attach operations for the same host atomic
var attachdetachMutex = keymutex.NewKeyMutex()
func (plugin *vsphereVolumePlugin) NewAttacher() (volume.Attacher, error) {
vsphereCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &vsphereVMDKAttacher{
host: plugin.host,
vsphereVolumes: vsphereCloud,
}, nil
}
// Attaches the volume specified by the given spec to the given host.
// On success, returns the device path where the device was attached on the
// node.
// Callers are responsible for retryinging on failure.
// Callers are responsible for thread safety between concurrent attach and
// detach operations.
func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
glog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName)
// Keeps concurrent attach operations to same host atomic
attachdetachMutex.LockKey(string(nodeName))
defer attachdetachMutex.UnlockKey(string(nodeName))
// vsphereCloud.AttachDisk checks if disk is already attached to host and
// succeeds in that case, so no need to do that separately.
diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyName, nodeName)
if err != nil {
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err)
return "", err
}
return path.Join(diskByIDPath, diskSCSIPrefix+diskUUID), nil
}
func (attacher *vsphereVMDKAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for vSphere", nodeName)
volumeNodeMap := map[types.NodeName][]*volume.Spec{
nodeName: specs,
}
nodeVolumesResult := make(map[*volume.Spec]bool)
nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap)
if err != nil {
glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err)
return nodeVolumesResult, err
}
if result, ok := nodesVerificationMap[nodeName]; ok {
return result, nil
}
return nodeVolumesResult, nil
}
func (attacher *vsphereVMDKAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool)
volumePathsByNode := make(map[types.NodeName][]string)
volumeSpecMap := make(map[string]*volume.Spec)
for nodeName, volumeSpecs := range volumesByNode {
for _, volumeSpec := range volumeSpecs {
volumeSource, _, err := getVolumeSource(volumeSpec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err)
continue
}
volPath := volumeSource.VolumePath
volumePathsByNode[nodeName] = append(volumePathsByNode[nodeName], volPath)
nodeVolume, nodeVolumeExists := volumesAttachedCheck[nodeName]
if !nodeVolumeExists {
nodeVolume = make(map[*volume.Spec]bool)
}
nodeVolume[volumeSpec] = true
volumeSpecMap[volPath] = volumeSpec
volumesAttachedCheck[nodeName] = nodeVolume
}
}
attachedResult, err := attacher.vsphereVolumes.DisksAreAttached(volumePathsByNode)
if err != nil {
glog.Errorf("Error checking if volumes are attached to nodes: %+v. err: %v", volumePathsByNode, err)
return volumesAttachedCheck, err
}
for nodeName, nodeVolumes := range attachedResult {
for volumePath, attached := range nodeVolumes {
if !attached {
spec := volumeSpecMap[volumePath]
setNodeVolume(volumesAttachedCheck, spec, nodeName, false)
}
}
}
return volumesAttachedCheck, nil
}
func (attacher *vsphereVMDKAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
if devicePath == "" {
return "", fmt.Errorf("WaitForAttach failed for VMDK %q: devicePath is empty.", volumeSource.VolumePath)
}
ticker := time.NewTicker(checkSleepDuration)
defer ticker.Stop()
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking VMDK %q is attached", volumeSource.VolumePath)
path, err := verifyDevicePath(devicePath)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Warningf("Error verifying VMDK (%q) is attached: %v", volumeSource.VolumePath, err)
} else if path != "" {
// A device path has successfully been created for the VMDK
glog.Infof("Successfully found attached VMDK %q.", volumeSource.VolumePath)
return path, nil
}
case <-timer.C:
return "", fmt.Errorf("Could not find attached VMDK %q. Timeout waiting for mount paths to be created.", volumeSource.VolumePath)
}
}
}
// GetDeviceMountPath returns a path where the device should
// point which should be bind mounted for individual volumes.
func (attacher *vsphereVMDKAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return makeGlobalPDPath(attacher.host, volumeSource.VolumePath), nil
}
// GetMountDeviceRefs finds all other references to the device referenced
// by deviceMountPath; returns a list of paths.
func (plugin *vsphereVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
// MountDevice mounts device to global mount point.
func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter(vsphereVolumePluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err)
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if notMnt {
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(vsphereVolumePluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
}
glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
}
return nil
}
type vsphereVMDKDetacher struct {
mounter mount.Interface
vsphereVolumes vsphere.Volumes
}
var _ volume.Detacher = &vsphereVMDKDetacher{}
func (plugin *vsphereVolumePlugin) NewDetacher() (volume.Detacher, error) {
vsphereCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &vsphereVMDKDetacher{
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
vsphereVolumes: vsphereCloud,
}, nil
}
// Detach the given device from the given node.
func (detacher *vsphereVMDKDetacher) Detach(volumeName string, nodeName types.NodeName) error {
volPath := getVolPathfromVolumeName(volumeName)
attached, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName)
if err != nil {
// Log error and continue with detach
glog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volPath, nodeName, err)
}
if err == nil && !attached {
// Volume is already detached from node.
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName)
return nil
}
attachdetachMutex.LockKey(string(nodeName))
defer attachdetachMutex.UnlockKey(string(nodeName))
if err := detacher.vsphereVolumes.DetachDisk(volPath, nodeName); err != nil {
glog.Errorf("Error detaching volume %q: %v", volPath, err)
return err
}
return nil
}
func (detacher *vsphereVMDKDetacher) UnmountDevice(deviceMountPath string) error {
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
}
func setNodeVolume(
nodeVolumeMap map[types.NodeName]map[*volume.Spec]bool,
volumeSpec *volume.Spec,
nodeName types.NodeName,
check bool) {
volumeMap := nodeVolumeMap[nodeName]
if volumeMap == nil {
volumeMap = make(map[*volume.Spec]bool)
nodeVolumeMap[nodeName] = volumeMap
}
volumeMap[volumeSpec] = check
}

View File

@ -0,0 +1,322 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"errors"
"testing"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
)
func TestGetDeviceName_Volume(t *testing.T) {
plugin := newPlugin()
volPath := "[local] volumes/test"
spec := createVolSpec(volPath)
deviceName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetDeviceName error: %v", err)
}
if deviceName != volPath {
t.Errorf("GetDeviceName error: expected %s, got %s", volPath, deviceName)
}
}
func TestGetDeviceName_PersistentVolume(t *testing.T) {
plugin := newPlugin()
volPath := "[local] volumes/test"
spec := createPVSpec(volPath)
deviceName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetDeviceName error: %v", err)
}
if deviceName != volPath {
t.Errorf("GetDeviceName error: expected %s, got %s", volPath, deviceName)
}
}
// One testcase for TestAttachDetach table test below
type testcase struct {
name string
// For fake vSphere:
attach attachCall
detach detachCall
diskIsAttached diskIsAttachedCall
t *testing.T
// Actual test to run
test func(test *testcase) (string, error)
// Expected return of the test
expectedDevice string
expectedError error
}
func TestAttachDetach(t *testing.T) {
uuid := "00000000000000"
diskName := "[local] volumes/test"
nodeName := types.NodeName("host")
spec := createVolSpec(diskName)
attachError := errors.New("Fake attach error")
detachError := errors.New("Fake detach error")
diskCheckError := errors.New("Fake DiskIsAttached error")
tests := []testcase{
// Successful Attach call
{
name: "Attach_Positive",
attach: attachCall{diskName, nodeName, uuid, nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedDevice: "/dev/disk/by-id/wwn-0x" + uuid,
},
// Attach call fails
{
name: "Attach_Negative",
attach: attachCall{diskName, nodeName, "", attachError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedError: attachError,
},
// Detach succeeds
{
name: "Detach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
},
},
// Disk is already detached
{
name: "Detach_Positive_AlreadyDetached",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
},
},
// Detach succeeds when DiskIsAttached fails
{
name: "Detach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
},
},
// Detach fails
{
name: "Detach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, nodeName, detachError},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
},
expectedError: detachError,
},
}
for _, testcase := range tests {
testcase.t = t
device, err := testcase.test(&testcase)
if err != testcase.expectedError {
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError.Error(), err.Error())
}
if device != testcase.expectedDevice {
t.Errorf("%s failed: expected device=%q, got %q", testcase.name, testcase.expectedDevice, device)
}
t.Logf("Test %q succeeded", testcase.name)
}
}
// newPlugin creates a new vsphereVolumePlugin with fake cloud, NewAttacher
// and NewDetacher won't work.
func newPlugin() *vsphereVolumePlugin {
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
plugins := ProbeVolumePlugins()
plugin := plugins[0]
plugin.Init(host)
return plugin.(*vsphereVolumePlugin)
}
func newAttacher(testcase *testcase) *vsphereVMDKAttacher {
return &vsphereVMDKAttacher{
host: nil,
vsphereVolumes: testcase,
}
}
func newDetacher(testcase *testcase) *vsphereVMDKDetacher {
return &vsphereVMDKDetacher{
vsphereVolumes: testcase,
}
}
func createVolSpec(name string) *volume.Spec {
return &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: name,
},
},
},
}
}
func createPVSpec(name string) *volume.Spec {
return &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: name,
},
},
},
},
}
}
// Fake vSphere implementation
type attachCall struct {
diskName string
nodeName types.NodeName
retDeviceUUID string
ret error
}
type detachCall struct {
diskName string
nodeName types.NodeName
ret error
}
type diskIsAttachedCall struct {
diskName string
nodeName types.NodeName
isAttached bool
ret error
}
func (testcase *testcase) AttachDisk(diskName string, storagePolicyName string, nodeName types.NodeName) (string, error) {
expected := &testcase.attach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return "", errors.New("Unexpected AttachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
}
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceUUID, expected.ret)
return expected.retDeviceUUID, expected.ret
}
func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) error {
expected := &testcase.detach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.detach looks uninitialized, test did not expect to call
// DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!")
return errors.New("Unexpected DetachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DetachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return errors.New("Unexpected DetachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return errors.New("Unexpected DetachDisk call: wrong nodeName")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret)
return expected.ret
}
func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
expected := &testcase.diskIsAttached
if expected.diskName == "" && expected.nodeName == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!")
return false, errors.New("Unexpected DiskIsAttached call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName)
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
}
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret
}
func (testcase *testcase) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
return nil, errors.New("Not implemented")
}
func (testcase *testcase) CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) {
return "", errors.New("Not implemented")
}
func (testcase *testcase) DeleteVolume(vmDiskPath string) error {
return errors.New("Not implemented")
}

View File

@ -0,0 +1,402 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"os"
"path"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&vsphereVolumePlugin{}}
}
type vsphereVolumePlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &vsphereVolumePlugin{}
var _ volume.PersistentVolumePlugin = &vsphereVolumePlugin{}
var _ volume.DeletableVolumePlugin = &vsphereVolumePlugin{}
var _ volume.ProvisionableVolumePlugin = &vsphereVolumePlugin{}
const (
vsphereVolumePluginName = "kubernetes.io/vsphere-volume"
)
// vSphere Volume Plugin
func (plugin *vsphereVolumePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *vsphereVolumePlugin) GetPluginName() string {
return vsphereVolumePluginName
}
func (plugin *vsphereVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.VolumePath, nil
}
func (plugin *vsphereVolumePlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.VsphereVolume != nil) ||
(spec.Volume != nil && spec.Volume.VsphereVolume != nil)
}
func (plugin *vsphereVolumePlugin) RequiresRemount() bool {
return false
}
func (plugin *vsphereVolumePlugin) SupportsMountOption() bool {
return true
}
func (plugin *vsphereVolumePlugin) SupportsBulkVolumeVerification() bool {
return true
}
func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *vsphereVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Mounter, error) {
vvol, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
volPath := vvol.VolumePath
fsType := vvol.FSType
return &vsphereVolumeMounter{
vsphereVolume: &vsphereVolume{
podUID: podUID,
volName: spec.Name(),
volPath: volPath,
manager: manager,
mounter: mounter,
plugin: plugin,
},
fsType: fsType,
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) {
return &vsphereVolumeUnmounter{
&vsphereVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}}, nil
}
func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
volumePath, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
volumePath = strings.Replace(volumePath, "\\040", " ", -1)
glog.V(5).Infof("vSphere volume path is %q", volumePath)
vsphereVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
},
},
}
return volume.NewSpecFromVolume(vsphereVolume), nil
}
// Abstract interface to disk operations.
type vdManager interface {
// Creates a volume
CreateVolume(provisioner *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error)
// Deletes a volume
DeleteVolume(deleter *vsphereVolumeDeleter) error
}
// vspherePersistentDisk volumes are disk resources are attached to the kubelet's host machine and exposed to the pod.
type vsphereVolume struct {
volName string
podUID types.UID
// Unique identifier of the volume, used to find the disk resource in the provider.
volPath string
// Filesystem type, optional.
fsType string
//diskID for detach disk
diskID string
// Utility interface that provides API calls to the provider to attach/detach disks.
manager vdManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter mount.Interface
plugin *vsphereVolumePlugin
volume.MetricsNil
}
var _ volume.Mounter = &vsphereVolumeMounter{}
type vsphereVolumeMounter struct {
*vsphereVolume
fsType string
diskMounter *mount.SafeFormatAndMount
}
func (b *vsphereVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
SupportsSELinux: true,
Managed: true,
}
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *vsphereVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *vsphereVolumeMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(5).Infof("vSphere volume setup %s to %s", b.volPath, dir)
// TODO: handle failed mounts here.
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.V(4).Infof("IsLikelyNotMountPoint failed: %v", err)
return err
}
if !notmnt {
glog.V(4).Infof("Something is already mounted to target %s", dir)
return nil
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
return err
}
options := []string{"bind"}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volPath)
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
return err
}
}
os.Remove(dir)
return err
}
volume.SetVolumeOwnership(b, fsGroup)
glog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir)
return nil
}
var _ volume.Unmounter = &vsphereVolumeUnmounter{}
type vsphereVolumeUnmounter struct {
*vsphereVolume
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (v *vsphereVolumeUnmounter) TearDown() error {
return v.TearDownAt(v.GetPath())
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (v *vsphereVolumeUnmounter) TearDownAt(dir string) error {
return util.UnmountPath(dir, v.mounter)
}
func makeGlobalPDPath(host volume.VolumeHost, devName string) string {
return path.Join(host.GetPluginDir(vsphereVolumePluginName), mount.MountsInGlobalPDPath, devName)
}
func (vv *vsphereVolume) GetPath() string {
name := vsphereVolumePluginName
return vv.plugin.host.GetPodVolumeDir(vv.podUID, utilstrings.EscapeQualifiedNameForDisk(name), vv.volName)
}
// vSphere Persistent Volume Plugin
func (plugin *vsphereVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
// vSphere Deletable Volume Plugin
type vsphereVolumeDeleter struct {
*vsphereVolume
}
var _ volume.Deleter = &vsphereVolumeDeleter{}
func (plugin *vsphereVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &VsphereDiskUtil{})
}
func (plugin *vsphereVolumePlugin) newDeleterInternal(spec *volume.Spec, manager vdManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.VsphereVolume == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.VsphereVolume is nil")
}
return &vsphereVolumeDeleter{
&vsphereVolume{
volName: spec.Name(),
volPath: spec.PersistentVolume.Spec.VsphereVolume.VolumePath,
manager: manager,
plugin: plugin,
}}, nil
}
func (r *vsphereVolumeDeleter) Delete() error {
return r.manager.DeleteVolume(r)
}
// vSphere Provisionable Volume Plugin
type vsphereVolumeProvisioner struct {
*vsphereVolume
options volume.VolumeOptions
}
var _ volume.Provisioner = &vsphereVolumeProvisioner{}
func (plugin *vsphereVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &VsphereDiskUtil{})
}
func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeOptions, manager vdManager) (volume.Provisioner, error) {
return &vsphereVolumeProvisioner{
vsphereVolume: &vsphereVolume{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
}
volSpec, err := v.manager.CreateVolume(v)
if err != nil {
return nil, err
}
if volSpec.Fstype == "" {
volSpec.Fstype = "ext4"
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: v.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
volumehelper.VolumeDynamicallyCreatedByKey: "vsphere-volume-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy,
AccessModes: v.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", volSpec.Size)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volSpec.Path,
FSType: volSpec.Fstype,
StoragePolicyName: volSpec.StoragePolicyName,
StoragePolicyID: volSpec.StoragePolicyID,
},
},
MountOptions: v.options.MountOptions,
},
}
if len(v.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = v.plugin.GetAccessModes()
}
return pv, nil
}
func getVolumeSource(
spec *volume.Spec) (*v1.VsphereVirtualDiskVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.VsphereVolume != nil {
return spec.Volume.VsphereVolume, spec.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.VsphereVolume != nil {
return spec.PersistentVolume.Spec.VsphereVolume, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a VSphere volume type")
}

View File

@ -0,0 +1,190 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"os"
"path"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/vsphere-volume" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
type fakePDManager struct {
}
func getFakeDeviceName(host volume.VolumeHost, volPath string) string {
return path.Join(host.GetPluginDir(vsphereVolumePluginName), "device", volPath)
}
func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) {
volSpec = &VolumeSpec{
Path: "[local] test-volume-name.vmdk",
Size: 100,
Fstype: "ext4",
StoragePolicyName: "gold",
StoragePolicyID: "1234",
}
return volSpec, nil
}
func (fake *fakePDManager) DeleteVolume(vd *vsphereVolumeDeleter) error {
if vd.volPath != "[local] test-volume-name.vmdk" {
return fmt.Errorf("Deleter got unexpected volume path: %s", vd.volPath)
}
return nil
}
func TestPlugin(t *testing.T) {
// Initial setup to test volume plugin
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: "[local] test-volume-name.vmdk",
FSType: "ext4",
},
},
}
// Test Mounter
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*vsphereVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
mntPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~vsphere-volume/vol1")
path := mounter.GetPath()
if path != mntPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
// Test Unmounter
fakeManager = &fakePDManager{}
unmounter, err := plug.(*vsphereVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*vsphereVolumePlugin).newProvisionerInternal(options, &fakePDManager{})
if err != nil {
t.Errorf("newProvisionerInternal() failed: %v", err)
}
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath != "[local] test-volume-name.vmdk" {
t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath)
}
if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.StoragePolicyName != "gold" {
t.Errorf("Provision() returned unexpected storagepolicy name %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.StoragePolicyName)
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*vsphereVolumePlugin).newDeleterInternal(volSpec, &fakePDManager{})
if err != nil {
t.Errorf("newDeleterInternal() failed: %v", err)
}
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
}

View File

@ -0,0 +1,261 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
maxRetries = 10
checkSleepDuration = time.Second
diskByIDPath = "/dev/disk/by-id/"
diskSCSIPrefix = "wwn-0x"
diskformat = "diskformat"
datastore = "datastore"
StoragePolicyName = "storagepolicyname"
HostFailuresToTolerateCapability = "hostfailurestotolerate"
ForceProvisioningCapability = "forceprovisioning"
CacheReservationCapability = "cachereservation"
DiskStripesCapability = "diskstripes"
ObjectSpaceReservationCapability = "objectspacereservation"
IopsLimitCapability = "iopslimit"
HostFailuresToTolerateCapabilityMin = 0
HostFailuresToTolerateCapabilityMax = 3
ForceProvisioningCapabilityMin = 0
ForceProvisioningCapabilityMax = 1
CacheReservationCapabilityMin = 0
CacheReservationCapabilityMax = 100
DiskStripesCapabilityMin = 1
DiskStripesCapabilityMax = 12
ObjectSpaceReservationCapabilityMin = 0
ObjectSpaceReservationCapabilityMax = 100
IopsLimitCapabilityMin = 0
)
var ErrProbeVolume = errors.New("Error scanning attached volumes")
type VsphereDiskUtil struct{}
type VolumeSpec struct {
Path string
Size int
Fstype string
StoragePolicyID string
StoragePolicyName string
}
func verifyDevicePath(path string) (string, error) {
if pathExists, err := volumeutil.PathExists(path); err != nil {
return "", fmt.Errorf("Error checking if path exists: %v", err)
} else if pathExists {
return path, nil
}
return "", nil
}
// CreateVolume creates a vSphere volume.
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) {
var fstype string
cloud, err := getCloudProvider(v.plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// vSphere works with kilobytes, convert to KiB with rounding up
volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))
name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
volumeOptions := &vclib.VolumeOptions{
CapacityKB: volSizeKB,
Tags: *v.options.CloudTags,
Name: name,
}
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for parameter, value := range v.options.Parameters {
switch strings.ToLower(parameter) {
case diskformat:
volumeOptions.DiskFormat = value
case datastore:
volumeOptions.Datastore = value
case volume.VolumeParameterFSType:
fstype = value
glog.V(4).Infof("Setting fstype as %q", fstype)
case StoragePolicyName:
volumeOptions.StoragePolicyName = value
glog.V(4).Infof("Setting StoragePolicyName as %q", volumeOptions.StoragePolicyName)
case HostFailuresToTolerateCapability, ForceProvisioningCapability,
CacheReservationCapability, DiskStripesCapability,
ObjectSpaceReservationCapability, IopsLimitCapability:
capabilityData, err := validateVSANCapability(strings.ToLower(parameter), value)
if err != nil {
return nil, err
}
volumeOptions.VSANStorageProfileData += capabilityData
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName())
}
}
if volumeOptions.VSANStorageProfileData != "" {
if volumeOptions.StoragePolicyName != "" {
return nil, fmt.Errorf("Cannot specify storage policy capabilities along with storage policy name. Please specify only one")
}
volumeOptions.VSANStorageProfileData = "(" + volumeOptions.VSANStorageProfileData + ")"
}
glog.V(4).Infof("VSANStorageProfileData in vsphere volume %q", volumeOptions.VSANStorageProfileData)
// TODO: implement PVC.Selector parsing
if v.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere")
}
vmDiskPath, err := cloud.CreateVolume(volumeOptions)
if err != nil {
return nil, err
}
volSpec = &VolumeSpec{
Path: vmDiskPath,
Size: volSizeKB,
Fstype: fstype,
StoragePolicyName: volumeOptions.StoragePolicyName,
StoragePolicyID: volumeOptions.StoragePolicyID,
}
glog.V(2).Infof("Successfully created vsphere volume %s", name)
return volSpec, nil
}
// DeleteVolume deletes a vSphere volume.
func (util *VsphereDiskUtil) DeleteVolume(vd *vsphereVolumeDeleter) error {
cloud, err := getCloudProvider(vd.plugin.host.GetCloudProvider())
if err != nil {
return err
}
if err = cloud.DeleteVolume(vd.volPath); err != nil {
glog.V(2).Infof("Error deleting vsphere volume %s: %v", vd.volPath, err)
return err
}
glog.V(2).Infof("Successfully deleted vsphere volume %s", vd.volPath)
return nil
}
func getVolPathfromVolumeName(deviceMountPath string) string {
// Assumption: No file or folder is named starting with '[' in datastore
volPath := deviceMountPath[strings.LastIndex(deviceMountPath, "["):]
// space between datastore and vmdk name in volumePath is encoded as '\040' when returned by GetMountRefs().
// volumePath eg: "[local] xxx.vmdk" provided to attach/mount
// replacing \040 with space to match the actual volumePath
return strings.Replace(volPath, "\\040", " ", -1)
}
func getCloudProvider(cloud cloudprovider.Interface) (*vsphere.VSphere, error) {
if cloud == nil {
glog.Errorf("Cloud provider not initialized properly")
return nil, errors.New("Cloud provider not initialized properly")
}
vs := cloud.(*vsphere.VSphere)
if vs == nil {
return nil, errors.New("Invalid cloud provider: expected vSphere")
}
return vs, nil
}
// Validate the capability requirement for the user specified policy attributes.
func validateVSANCapability(capabilityName string, capabilityValue string) (string, error) {
var capabilityData string
capabilityIntVal, ok := verifyCapabilityValueIsInteger(capabilityValue)
if !ok {
return "", fmt.Errorf("Invalid value for %s. The capabilityValue: %s must be a valid integer value", capabilityName, capabilityValue)
}
switch strings.ToLower(capabilityName) {
case HostFailuresToTolerateCapability:
if capabilityIntVal >= HostFailuresToTolerateCapabilityMin && capabilityIntVal <= HostFailuresToTolerateCapabilityMax {
capabilityData = " (\"hostFailuresToTolerate\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`Invalid value for hostFailuresToTolerate.
The default value is %d, minimum value is %d and maximum value is %d.`,
1, HostFailuresToTolerateCapabilityMin, HostFailuresToTolerateCapabilityMax)
}
case ForceProvisioningCapability:
if capabilityIntVal >= ForceProvisioningCapabilityMin && capabilityIntVal <= ForceProvisioningCapabilityMax {
capabilityData = " (\"forceProvisioning\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`Invalid value for forceProvisioning.
The value can be either %d or %d.`,
ForceProvisioningCapabilityMin, ForceProvisioningCapabilityMax)
}
case CacheReservationCapability:
if capabilityIntVal >= CacheReservationCapabilityMin && capabilityIntVal <= CacheReservationCapabilityMax {
capabilityData = " (\"cacheReservation\" i" + strconv.Itoa(capabilityIntVal*10000) + ")"
} else {
return "", fmt.Errorf(`Invalid value for cacheReservation.
The minimum percentage is %d and maximum percentage is %d.`,
CacheReservationCapabilityMin, CacheReservationCapabilityMax)
}
case DiskStripesCapability:
if capabilityIntVal >= DiskStripesCapabilityMin && capabilityIntVal <= DiskStripesCapabilityMax {
capabilityData = " (\"stripeWidth\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`Invalid value for diskStripes.
The minimum value is %d and maximum value is %d.`,
DiskStripesCapabilityMin, DiskStripesCapabilityMax)
}
case ObjectSpaceReservationCapability:
if capabilityIntVal >= ObjectSpaceReservationCapabilityMin && capabilityIntVal <= ObjectSpaceReservationCapabilityMax {
capabilityData = " (\"proportionalCapacity\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`Invalid value for ObjectSpaceReservation.
The minimum percentage is %d and maximum percentage is %d.`,
ObjectSpaceReservationCapabilityMin, ObjectSpaceReservationCapabilityMax)
}
case IopsLimitCapability:
if capabilityIntVal >= IopsLimitCapabilityMin {
capabilityData = " (\"iopsLimit\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`Invalid value for iopsLimit.
The value should be greater than %d.`, IopsLimitCapabilityMin)
}
}
return capabilityData, nil
}
// Verify if the capability value is of type integer.
func verifyCapabilityValueIsInteger(capabilityValue string) (int, bool) {
i, err := strconv.Atoi(capabilityValue)
if err != nil {
return -1, false
}
return i, true
}