mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor files
This commit is contained in:
72
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD
generated
vendored
Normal file
72
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"cinder.go",
|
||||
"cinder_util.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/cinder",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/openstack:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"cinder_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/cinder",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
13
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
approvers:
|
||||
- jsafrane
|
||||
- anguslees
|
||||
- dims
|
||||
- FengyunPan
|
||||
reviewers:
|
||||
- anguslees
|
||||
- rootfs
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- jingxu97
|
||||
- msau42
|
||||
- FengyunPan
|
434
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
Normal file
434
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
Normal file
@ -0,0 +1,434 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
type cinderDiskAttacher struct {
|
||||
host volume.VolumeHost
|
||||
cinderProvider CinderProvider
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &cinderDiskAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
const (
|
||||
checkSleepDuration = 1 * time.Second
|
||||
operationFinishInitDealy = 1 * time.Second
|
||||
operationFinishFactor = 1.1
|
||||
operationFinishSteps = 10
|
||||
diskAttachInitDealy = 1 * time.Second
|
||||
diskAttachFactor = 1.2
|
||||
diskAttachSteps = 15
|
||||
diskDetachInitDealy = 1 * time.Second
|
||||
diskDetachFactor = 1.2
|
||||
diskDetachSteps = 13
|
||||
)
|
||||
|
||||
func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cinderDiskAttacher{
|
||||
host: plugin.host,
|
||||
cinderProvider: cinder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: operationFinishInitDealy,
|
||||
Factor: operationFinishFactor,
|
||||
Steps: operationFinishSteps,
|
||||
}
|
||||
|
||||
var volumeStatus string
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
var pending bool
|
||||
var err error
|
||||
pending, volumeStatus, err = attacher.cinderProvider.OperationPending(volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !pending, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Volume %q is %s, can't finish within the alloted time", volumeID, volumeStatus)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: diskAttachInitDealy,
|
||||
Factor: diskAttachFactor,
|
||||
Steps: diskAttachSteps,
|
||||
}
|
||||
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return attached, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Volume %q failed to be attached within the alloted time", volumeID)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := volumeSource.VolumeID
|
||||
|
||||
instanceID, err := attacher.nodeInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := attacher.waitOperationFinished(volumeID); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Warningf(
|
||||
"Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v",
|
||||
volumeID, instanceID, err)
|
||||
}
|
||||
|
||||
if err == nil && attached {
|
||||
// Volume is already attached to instance.
|
||||
glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
|
||||
} else {
|
||||
_, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID)
|
||||
if err == nil {
|
||||
if err = attacher.waitDiskAttached(instanceID, volumeID); err != nil {
|
||||
glog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
|
||||
} else {
|
||||
glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID)
|
||||
if err != nil {
|
||||
glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return devicePath, nil
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
volumeSpecMap := make(map[string]*volume.Spec)
|
||||
volumeIDList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
volumeIDList = append(volumeIDList, volumeSource.VolumeID)
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumeSpecMap[volumeSource.VolumeID] = spec
|
||||
}
|
||||
|
||||
instanceID, err := attacher.nodeInstanceID(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// If node doesn't exist, OpenStack Nova will assume the volumes are not attached to it.
|
||||
// Mark the volumes as detached and return false without error.
|
||||
glog.Warningf("VolumesAreAttached: node %q does not exist.", nodeName)
|
||||
for spec := range volumesAttachedCheck {
|
||||
volumesAttachedCheck[spec] = false
|
||||
}
|
||||
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
attachedResult, err := attacher.cinderProvider.DisksAreAttached(instanceID, volumeIDList)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
||||
volumeIDList, nodeName, err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
for volumeID, attached := range attachedResult {
|
||||
if !attached {
|
||||
spec := volumeSpecMap[volumeID]
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
||||
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := volumeSource.VolumeID
|
||||
|
||||
if devicePath == "" {
|
||||
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(checkSleepDuration)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
|
||||
probeAttachedVolume()
|
||||
if !attacher.cinderProvider.ShouldTrustDevicePath() {
|
||||
// Using the Cinder volume ID, find the real device path (See Issue #33128)
|
||||
devicePath = attacher.cinderProvider.GetDevicePath(volumeID)
|
||||
}
|
||||
exists, err := volumeutil.PathExists(devicePath)
|
||||
if exists && err == nil {
|
||||
glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
|
||||
return devicePath, nil
|
||||
} else {
|
||||
// Log an error, and continue checking periodically
|
||||
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
|
||||
}
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("Could not find attached Cinder disk %q. Timeout waiting for mount paths to be created.", volumeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDName(attacher.host, volumeSource.VolumeID), nil
|
||||
}
|
||||
|
||||
// FIXME: this method can be further pruned.
|
||||
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.host.GetMounter(cinderVolumePluginName)
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cinderDiskDetacher struct {
|
||||
mounter mount.Interface
|
||||
cinderProvider CinderProvider
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &cinderDiskDetacher{}
|
||||
|
||||
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cinderDiskDetacher{
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
cinderProvider: cinder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: operationFinishInitDealy,
|
||||
Factor: operationFinishFactor,
|
||||
Steps: operationFinishSteps,
|
||||
}
|
||||
|
||||
var volumeStatus string
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
var pending bool
|
||||
var err error
|
||||
pending, volumeStatus, err = detacher.cinderProvider.OperationPending(volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !pending, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Volume %q is %s, can't finish within the alloted time", volumeID, volumeStatus)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: diskDetachInitDealy,
|
||||
Factor: diskDetachFactor,
|
||||
Steps: diskDetachSteps,
|
||||
}
|
||||
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !attached, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Volume %q failed to detach within the alloted time", volumeID)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
volumeID := path.Base(volumeName)
|
||||
instances, res := detacher.cinderProvider.Instances()
|
||||
if !res {
|
||||
return fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
|
||||
instanceID = instanceID[(ind + 1):]
|
||||
}
|
||||
|
||||
if err := detacher.waitOperationFinished(volumeID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
volumeID, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is already detached from node.
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil {
|
||||
glog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err)
|
||||
return err
|
||||
}
|
||||
if err = detacher.waitDiskDetached(instanceID, volumeID); err != nil {
|
||||
glog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err)
|
||||
return err
|
||||
}
|
||||
glog.Infof("detached volume %q from node %q", volumeID, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) nodeInstanceID(nodeName types.NodeName) (string, error) {
|
||||
instances, res := attacher.cinderProvider.Instances()
|
||||
if !res {
|
||||
return "", fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
|
||||
instanceID = instanceID[(ind + 1):]
|
||||
}
|
||||
return instanceID, nil
|
||||
}
|
672
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
Normal file
672
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
Normal file
@ -0,0 +1,672 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
VolumeStatusPending = "pending"
|
||||
VolumeStatusDone = "done"
|
||||
)
|
||||
|
||||
var attachStatus = "Attach"
|
||||
var detachStatus = "Detach"
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-cinder-volume"
|
||||
spec := createVolSpec(name, false)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetDeviceName error: %v", err)
|
||||
}
|
||||
if deviceName != name {
|
||||
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDeviceName_PersistentVolume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-cinder-pv"
|
||||
spec := createPVSpec(name, true)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetDeviceName error: %v", err)
|
||||
}
|
||||
if deviceName != name {
|
||||
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDeviceMountPath(t *testing.T) {
|
||||
name := "cinder-volume-id"
|
||||
spec := createVolSpec(name, false)
|
||||
rootDir := "/var/lib/kubelet/"
|
||||
host := volumetest.NewFakeVolumeHost(rootDir, nil, nil)
|
||||
|
||||
attacher := &cinderDiskAttacher{
|
||||
host: host,
|
||||
}
|
||||
|
||||
//test the path
|
||||
path, err := attacher.GetDeviceMountPath(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Get device mount path error")
|
||||
}
|
||||
expectedPath := rootDir + "plugins/kubernetes.io/cinder/mounts/" + name
|
||||
if path != expectedPath {
|
||||
t.Errorf("Device mount path error: expected %s, got %s ", expectedPath, path)
|
||||
}
|
||||
}
|
||||
|
||||
// One testcase for TestAttachDetach table test below
|
||||
type testcase struct {
|
||||
name string
|
||||
// For fake GCE:
|
||||
attach attachCall
|
||||
detach detachCall
|
||||
operationPending operationPendingCall
|
||||
diskIsAttached diskIsAttachedCall
|
||||
disksAreAttached disksAreAttachedCall
|
||||
diskPath diskPathCall
|
||||
t *testing.T
|
||||
attachOrDetach *string
|
||||
|
||||
instanceID string
|
||||
// Actual test to run
|
||||
test func(test *testcase) (string, error)
|
||||
// Expected return of the test
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
volumeID := "disk"
|
||||
instanceID := "instance"
|
||||
pending := VolumeStatusPending
|
||||
done := VolumeStatusDone
|
||||
nodeName := types.NodeName("nodeName")
|
||||
readOnly := false
|
||||
spec := createVolSpec(volumeID, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
diskCheckError := errors.New("Fake DiskIsAttached error")
|
||||
diskPathError := errors.New("Fake GetAttachmentDiskPath error")
|
||||
disksCheckError := errors.New("Fake DisksAreAttached error")
|
||||
operationFinishTimeout := errors.New("Fake waitOperationFinished error")
|
||||
tests := []testcase{
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
|
||||
attach: attachCall{instanceID, volumeID, "", nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedResult: "/dev/sda",
|
||||
},
|
||||
|
||||
// Disk is already attached
|
||||
{
|
||||
name: "Attach_Positive_AlreadyAttached",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedResult: "/dev/sda",
|
||||
},
|
||||
|
||||
// Disk is attaching
|
||||
{
|
||||
name: "Attach_is_attaching",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, true, pending, operationFinishTimeout},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: operationFinishTimeout,
|
||||
},
|
||||
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
|
||||
attach: attachCall{instanceID, volumeID, "/dev/sda", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: attachError,
|
||||
},
|
||||
|
||||
// GetAttachmentDiskPath call fails
|
||||
{
|
||||
name: "Attach_Negative_DiskPatchFails",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
|
||||
attach: attachCall{instanceID, volumeID, "", nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "", diskPathError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: diskPathError,
|
||||
},
|
||||
|
||||
// Successful VolumesAreAttached call, attached
|
||||
{
|
||||
name: "VolumesAreAttached_Positive",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: true}, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
return serializeAttachments(attachments), err
|
||||
},
|
||||
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: true}),
|
||||
},
|
||||
|
||||
// Successful VolumesAreAttached call, not attached
|
||||
{
|
||||
name: "VolumesAreAttached_Negative",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: false}, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
return serializeAttachments(attachments), err
|
||||
},
|
||||
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: false}),
|
||||
},
|
||||
|
||||
// Treat as attached when DisksAreAttached call fails
|
||||
{
|
||||
name: "VolumesAreAttached_CinderFailed",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, nil, disksCheckError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
return serializeAttachments(attachments), err
|
||||
},
|
||||
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: true}),
|
||||
expectedError: disksCheckError,
|
||||
},
|
||||
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil},
|
||||
detach: detachCall{instanceID, volumeID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already detached
|
||||
{
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach succeeds when DiskIsAttached fails
|
||||
{
|
||||
name: "Detach_Positive_CheckFails",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
|
||||
detach: detachCall{instanceID, volumeID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
|
||||
detach: detachCall{instanceID, volumeID, detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
expectedError: detachError,
|
||||
},
|
||||
|
||||
// // Disk is detaching
|
||||
{
|
||||
name: "Detach_Is_Detaching",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, true, pending, operationFinishTimeout},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
expectedError: operationFinishTimeout,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
testcase.t = t
|
||||
attachOrDetach := ""
|
||||
testcase.attachOrDetach = &attachOrDetach
|
||||
result, err := testcase.test(&testcase)
|
||||
if err != testcase.expectedError {
|
||||
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError, err)
|
||||
}
|
||||
if result != testcase.expectedResult {
|
||||
t.Errorf("%s failed: expected result=%q, got %q", testcase.name, testcase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type volumeAttachmentFlag struct {
|
||||
volumeID string
|
||||
attached bool
|
||||
}
|
||||
|
||||
type volumeAttachmentFlags []volumeAttachmentFlag
|
||||
|
||||
func (va volumeAttachmentFlags) Len() int {
|
||||
return len(va)
|
||||
}
|
||||
|
||||
func (va volumeAttachmentFlags) Swap(i, j int) {
|
||||
va[i], va[j] = va[j], va[i]
|
||||
}
|
||||
|
||||
func (va volumeAttachmentFlags) Less(i, j int) bool {
|
||||
if va[i].volumeID < va[j].volumeID {
|
||||
return true
|
||||
}
|
||||
if va[i].volumeID > va[j].volumeID {
|
||||
return false
|
||||
}
|
||||
return va[j].attached
|
||||
}
|
||||
|
||||
func serializeAttachments(attachments map[*volume.Spec]bool) string {
|
||||
var attachmentFlags volumeAttachmentFlags
|
||||
for spec, attached := range attachments {
|
||||
attachmentFlags = append(attachmentFlags, volumeAttachmentFlag{spec.Name(), attached})
|
||||
}
|
||||
sort.Sort(attachmentFlags)
|
||||
return fmt.Sprint(attachmentFlags)
|
||||
}
|
||||
|
||||
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
|
||||
// and NewDetacher won't work.
|
||||
func newPlugin() *cinderPlugin {
|
||||
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
|
||||
plugins := ProbeVolumePlugins()
|
||||
plugin := plugins[0]
|
||||
plugin.Init(host)
|
||||
return plugin.(*cinderPlugin)
|
||||
}
|
||||
|
||||
func newAttacher(testcase *testcase) *cinderDiskAttacher {
|
||||
return &cinderDiskAttacher{
|
||||
host: nil,
|
||||
cinderProvider: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func newDetacher(testcase *testcase) *cinderDiskDetacher {
|
||||
return &cinderDiskDetacher{
|
||||
cinderProvider: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func createVolSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Fake GCE implementation
|
||||
|
||||
type attachCall struct {
|
||||
instanceID string
|
||||
volumeID string
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
|
||||
type detachCall struct {
|
||||
instanceID string
|
||||
devicePath string
|
||||
ret error
|
||||
}
|
||||
|
||||
type operationPendingCall struct {
|
||||
diskName string
|
||||
pending bool
|
||||
volumeStatus string
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
instanceID string
|
||||
volumeID string
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskPathCall struct {
|
||||
instanceID string
|
||||
volumeID string
|
||||
retPath string
|
||||
ret error
|
||||
}
|
||||
|
||||
type disksAreAttachedCall struct {
|
||||
instanceID string
|
||||
volumeIDs []string
|
||||
areAttached map[string]bool
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
return "", errors.New("Unexpected AttachDisk call!")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
|
||||
|
||||
testcase.attachOrDetach = &attachStatus
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(instanceID, volumeID string) error {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.devicePath == "" && expected.instanceID == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
return errors.New("Unexpected DetachDisk call!")
|
||||
}
|
||||
|
||||
if expected.devicePath != volumeID {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected volumeID %s, got %s", expected.devicePath, volumeID)
|
||||
return errors.New("Unexpected DetachDisk call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return errors.New("Unexpected DetachDisk call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
|
||||
|
||||
testcase.attachOrDetach = &detachStatus
|
||||
return expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) OperationPending(diskName string) (bool, string, error) {
|
||||
expected := &testcase.operationPending
|
||||
|
||||
if expected.volumeStatus == VolumeStatusPending {
|
||||
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
|
||||
return true, expected.volumeStatus, expected.ret
|
||||
}
|
||||
|
||||
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
|
||||
|
||||
return false, expected.volumeStatus, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
// If testcase call DetachDisk*, return false
|
||||
if *testcase.attachOrDetach == detachStatus {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If testcase call AttachDisk*, return true
|
||||
if *testcase.attachOrDetach == attachStatus {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call!")
|
||||
return false, errors.New("Unexpected DiskIsAttached call!")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
|
||||
expected := &testcase.diskPath
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.diskPath looks uninitialized, test did not expect to
|
||||
// call GetAttachmentDiskPath
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call!")
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call!")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
|
||||
|
||||
return expected.retPath, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) ShouldTrustDevicePath() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
|
||||
return "", "", false, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetDevicePath(volumeID string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (testcase *testcase) InstanceID() (string, error) {
|
||||
return testcase.instanceID, nil
|
||||
}
|
||||
|
||||
func (testcase *testcase) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
|
||||
return resource.Quantity{}, nil
|
||||
}
|
||||
|
||||
func (testcase *testcase) DeleteVolume(volumeID string) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetAutoLabelsForPD(name string) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) Instances() (cloudprovider.Instances, bool) {
|
||||
return &instances{testcase.instanceID}, true
|
||||
}
|
||||
|
||||
func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
|
||||
expected := &testcase.disksAreAttached
|
||||
|
||||
areAttached := make(map[string]bool)
|
||||
|
||||
if len(expected.volumeIDs) == 0 && expected.instanceID == "" {
|
||||
// testcase.volumeIDs looks uninitialized, test did not expect to call DisksAreAttached
|
||||
testcase.t.Errorf("Unexpected DisksAreAttached call!")
|
||||
return areAttached, errors.New("Unexpected DisksAreAttached call")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expected.volumeIDs, volumeIDs) {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttached call: expected volumeIDs %v, got %v", expected.volumeIDs, volumeIDs)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret)
|
||||
|
||||
return expected.areAttached, expected.ret
|
||||
}
|
||||
|
||||
// Implementation of fake cloudprovider.Instances
|
||||
type instances struct {
|
||||
instanceID string
|
||||
}
|
||||
|
||||
func (instances *instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
return []v1.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
return []v1.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) ExternalID(name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceID(name types.NodeName) (string, error) {
|
||||
return instances.instanceID, nil
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
return false, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (instances *instances) List(filter string) ([]types.NodeName, error) {
|
||||
return []types.NodeName{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
541
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
Normal file
541
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
Normal file
@ -0,0 +1,541 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&cinderPlugin{}}
|
||||
}
|
||||
|
||||
type CinderProvider interface {
|
||||
AttachDisk(instanceID, volumeID string) (string, error)
|
||||
DetachDisk(instanceID, volumeID string) error
|
||||
DeleteVolume(volumeID string) error
|
||||
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error)
|
||||
GetDevicePath(volumeID string) string
|
||||
InstanceID() (string, error)
|
||||
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
|
||||
OperationPending(diskName string) (bool, string, error)
|
||||
DiskIsAttached(instanceID, volumeID string) (bool, error)
|
||||
DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error)
|
||||
ShouldTrustDevicePath() bool
|
||||
Instances() (cloudprovider.Instances, bool)
|
||||
ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
|
||||
}
|
||||
|
||||
type cinderPlugin struct {
|
||||
host volume.VolumeHost
|
||||
// Guarding SetUp and TearDown operations
|
||||
volumeLocks keymutex.KeyMutex
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &cinderPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &cinderPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &cinderPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
const (
|
||||
cinderVolumePluginName = "kubernetes.io/cinder"
|
||||
)
|
||||
|
||||
func (plugin *cinderPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.volumeLocks = keymutex.NewKeyMutex()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetPluginName() string {
|
||||
return cinderVolumePluginName
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.VolumeID, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.Volume != nil && spec.Volume.Cinder != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder != nil)
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) SupportsMountOption() bool {
|
||||
return true
|
||||
|
||||
}
|
||||
func (plugin *cinderPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
cinder, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pdName := cinder.VolumeID
|
||||
fsType := cinder.FSType
|
||||
|
||||
return &cinderVolumeMounter{
|
||||
cinderVolume: &cinderVolume{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
pdName: pdName,
|
||||
mounter: mounter,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
blockDeviceMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &cinderVolumeUnmounter{
|
||||
&cinderVolume{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return plugin.newDeleterInternal(spec, &CinderDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdManager) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.Cinder is nil")
|
||||
}
|
||||
return &cinderVolumeDeleter{
|
||||
&cinderVolume{
|
||||
volName: spec.Name(),
|
||||
pdName: spec.PersistentVolume.Spec.Cinder.VolumeID,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return plugin.newProvisionerInternal(options, &CinderDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, manager cdManager) (volume.Provisioner, error) {
|
||||
return &cinderVolumeProvisioner{
|
||||
cinderVolume: &cinderVolume{
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getCloudProvider(cloudProvider cloudprovider.Interface) (CinderProvider, error) {
|
||||
if cloud, ok := cloudProvider.(*openstack.OpenStack); ok && cloud != nil {
|
||||
return cloud, nil
|
||||
}
|
||||
return nil, fmt.Errorf("wrong cloud type")
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) getCloudProvider() (CinderProvider, error) {
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
if cloud == nil {
|
||||
glog.Errorf("Cloud provider not initialized properly")
|
||||
return nil, errors.New("Cloud provider not initialized properly")
|
||||
}
|
||||
|
||||
switch cloud := cloud.(type) {
|
||||
case *openstack.OpenStack:
|
||||
return cloud, nil
|
||||
default:
|
||||
return nil, errors.New("Invalid cloud provider: expected OpenStack.")
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath)
|
||||
cinderVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(cinderVolume), nil
|
||||
}
|
||||
|
||||
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
cinder, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
cloud, err := plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
expandedSize, err := cloud.ExpandVolume(cinder.VolumeID, oldSize, newSize)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("volume %s expanded to new size %d successfully", cinder.VolumeID, int(newSize.Value()))
|
||||
return expandedSize, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) RequiresFSResize() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Abstract interface to PD operations.
|
||||
type cdManager interface {
|
||||
// Attaches the disk to the kubelet's host machine.
|
||||
AttachDisk(mounter *cinderVolumeMounter, globalPDPath string) error
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
DetachDisk(unmounter *cinderVolumeUnmounter) error
|
||||
// Creates a volume
|
||||
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *cinderVolumeDeleter) error
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &cinderVolumeMounter{}
|
||||
|
||||
type cinderVolumeMounter struct {
|
||||
*cinderVolume
|
||||
fsType string
|
||||
readOnly bool
|
||||
blockDeviceMounter *mount.SafeFormatAndMount
|
||||
}
|
||||
|
||||
// cinderPersistentDisk volumes are disk resources provided by C3
|
||||
// that are attached to the kubelet's host machine and exposed to the pod.
|
||||
type cinderVolume struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
// Unique identifier of the volume, used to find the disk resource in the provider.
|
||||
pdName string
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
||||
manager cdManager
|
||||
// Mounter interface that provides system calls to mount the global path to the pod local path.
|
||||
mounter mount.Interface
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
blockDeviceMounter mount.Interface
|
||||
plugin *cinderPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
func (b *cinderVolumeMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *cinderVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUp bind mounts to the volume path.
|
||||
func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
|
||||
|
||||
b.plugin.volumeLocks.LockKey(b.pdName)
|
||||
defer b.plugin.volumeLocks.UnlockKey(b.pdName)
|
||||
|
||||
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("Cannot validate mount point: %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if !notmnt {
|
||||
glog.V(4).Infof("Something is already mounted to target %s", dir)
|
||||
return nil
|
||||
}
|
||||
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, options)
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Mount failed: %v", err)
|
||||
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notmnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notmnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
glog.Errorf("Failed to mount %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
}
|
||||
glog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeGlobalPDName(host volume.VolumeHost, devName string) string {
|
||||
return path.Join(host.GetPluginDir(cinderVolumePluginName), mount.MountsInGlobalPDPath, devName)
|
||||
}
|
||||
|
||||
func (cd *cinderVolume) GetPath() string {
|
||||
name := cinderVolumePluginName
|
||||
return cd.plugin.host.GetPodVolumeDir(cd.podUID, kstrings.EscapeQualifiedNameForDisk(name), cd.volName)
|
||||
}
|
||||
|
||||
type cinderVolumeUnmounter struct {
|
||||
*cinderVolume
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &cinderVolumeUnmounter{}
|
||||
|
||||
func (c *cinderVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
|
||||
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Cinder TearDown of %s", dir)
|
||||
notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if notmnt {
|
||||
glog.V(4).Infof("Nothing is mounted to %s, ignoring", dir)
|
||||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
// Find Cinder volumeID to lock the right volume
|
||||
// TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like
|
||||
// NewMounter. We could then find volumeID there without probing MountRefs.
|
||||
refs, err := mount.GetMountRefs(c.mounter, dir)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("GetMountRefs failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
glog.V(4).Infof("Directory %s is not mounted", dir)
|
||||
return fmt.Errorf("directory %s is not mounted", dir)
|
||||
}
|
||||
c.pdName = path.Base(refs[0])
|
||||
glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)
|
||||
|
||||
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
|
||||
c.plugin.volumeLocks.LockKey(c.pdName)
|
||||
defer c.plugin.volumeLocks.UnlockKey(c.pdName)
|
||||
|
||||
// Reload list of references, there might be SetUpAt finished in the meantime
|
||||
refs, err = mount.GetMountRefs(c.mounter, dir)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("GetMountRefs failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
glog.V(4).Infof("Unmount failed: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("Successfully unmounted: %s\n", dir)
|
||||
|
||||
notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if notmnt {
|
||||
if err := os.Remove(dir); err != nil {
|
||||
glog.V(4).Infof("Failed to remove directory after unmount: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cinderVolumeDeleter struct {
|
||||
*cinderVolume
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &cinderVolumeDeleter{}
|
||||
|
||||
func (r *cinderVolumeDeleter) GetPath() string {
|
||||
name := cinderVolumePluginName
|
||||
return r.plugin.host.GetPodVolumeDir(r.podUID, kstrings.EscapeQualifiedNameForDisk(name), r.volName)
|
||||
}
|
||||
|
||||
func (r *cinderVolumeDeleter) Delete() error {
|
||||
return r.manager.DeleteVolume(r)
|
||||
}
|
||||
|
||||
type cinderVolumeProvisioner struct {
|
||||
*cinderVolume
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &cinderVolumeProvisioner{}
|
||||
|
||||
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.options.PVName,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: volumeID,
|
||||
FSType: fstype,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
MountOptions: c.options.MountOptions,
|
||||
},
|
||||
}
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.CinderVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.Cinder != nil {
|
||||
return spec.Volume.Cinder, spec.Volume.Cinder.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.Cinder != nil {
|
||||
return spec.PersistentVolume.Spec.Cinder, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a Cinder volume type")
|
||||
}
|
229
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
Normal file
229
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/cinder" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Cinder: &v1.CinderVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
type fakePDManager struct {
|
||||
// How long should AttachDisk/DetachDisk take - we need slower AttachDisk in a test.
|
||||
attachDetachDuration time.Duration
|
||||
}
|
||||
|
||||
func getFakeDeviceName(host volume.VolumeHost, pdName string) string {
|
||||
return path.Join(host.GetPluginDir(cinderVolumePluginName), "device", pdName)
|
||||
}
|
||||
|
||||
// Real Cinder AttachDisk attaches a cinder volume. If it is not yet mounted,
|
||||
// it mounts it to globalPDPath.
|
||||
// We create a dummy directory (="device") and bind-mount it to globalPDPath
|
||||
func (fake *fakePDManager) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||
globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
fakeDeviceName := getFakeDeviceName(b.plugin.host, b.pdName)
|
||||
err := os.MkdirAll(fakeDeviceName, 0750)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Attaching a Cinder volume can be slow...
|
||||
time.Sleep(fake.attachDetachDuration)
|
||||
|
||||
// The volume is "attached", bind-mount it if it's not mounted yet.
|
||||
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(globalPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notmnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if notmnt {
|
||||
err = b.mounter.Mount(fakeDeviceName, globalPath, "", []string{"bind"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
|
||||
globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
|
||||
fakeDeviceName := getFakeDeviceName(c.plugin.host, c.pdName)
|
||||
// unmount the bind-mount - should be fast
|
||||
err := c.mounter.Unmount(globalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// "Detach" the fake "device"
|
||||
err = os.RemoveAll(fakeDeviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
|
||||
return "test-volume-name", 1, nil, "", nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
if cd.pdName != "test-volume-name" {
|
||||
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.pdName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cinder/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
|
||||
// Test Provisioner
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
||||
if persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID != "test-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID)
|
||||
}
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
}
|
||||
|
||||
// Test Deleter
|
||||
volSpec := &volume.Spec{
|
||||
PersistentVolume: persistentSpec,
|
||||
}
|
||||
deleter, err := plug.(*cinderPlugin).newDeleterInternal(volSpec, &fakePDManager{0})
|
||||
err = deleter.Delete()
|
||||
if err != nil {
|
||||
t.Errorf("Deleter() failed: %v", err)
|
||||
}
|
||||
}
|
247
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go
generated
vendored
Normal file
247
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go
generated
vendored
Normal file
@ -0,0 +1,247 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
type CinderDiskUtil struct{}
|
||||
|
||||
// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
|
||||
// Mounts the disk to its global path.
|
||||
func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||
options := []string{}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
cloud, err := b.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceid, err := cloud.InstanceID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diskid, err := cloud.AttachDisk(instanceid, b.pdName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var devicePath string
|
||||
numTries := 0
|
||||
for {
|
||||
devicePath = cloud.GetDevicePath(diskid)
|
||||
probeAttachedVolume()
|
||||
|
||||
_, err := os.Stat(devicePath)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
numTries++
|
||||
if numTries == 10 {
|
||||
return errors.New("Could not attach disk: Timeout after 60s")
|
||||
}
|
||||
time.Sleep(time.Second * 6)
|
||||
}
|
||||
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notmnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if notmnt {
|
||||
err = b.blockDeviceMounter.FormatAndMount(devicePath, globalPDPath, b.fsType, options)
|
||||
if err != nil {
|
||||
os.Remove(globalPDPath)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmounts the device and detaches the disk from the kubelet's host machine.
|
||||
func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
|
||||
globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName)
|
||||
if err := cd.mounter.Unmount(globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
|
||||
|
||||
cloud, err := cd.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceid, err := cloud.InstanceID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = cloud.DetachDisk(instanceid, cd.pdName); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (util *CinderDiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
cloud, err := cd.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = cloud.DeleteVolume(cd.pdName); err != nil {
|
||||
// OpenStack cloud provider returns volume.tryAgainError when necessary,
|
||||
// no handling needed here.
|
||||
glog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
|
||||
// TODO: caching, currently it is overkill because it calls this function
|
||||
// only when it creates dynamic PV
|
||||
zones := make(sets.String)
|
||||
nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error listing nodes")
|
||||
return zones, err
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
if zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]; ok {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("zones found: %v", zones)
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
|
||||
cloud, err := c.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
|
||||
vtype := ""
|
||||
availability := ""
|
||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
for k, v := range c.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "type":
|
||||
vtype = v
|
||||
case "availability":
|
||||
availability = v
|
||||
case volume.VolumeParameterFSType:
|
||||
fstype = v
|
||||
default:
|
||||
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder")
|
||||
}
|
||||
|
||||
if availability == "" {
|
||||
// No zone specified, choose one randomly in the same region
|
||||
zones, err := getZonesFromNodes(c.plugin.host.GetKubeClient())
|
||||
if err != nil {
|
||||
glog.V(2).Infof("error getting zone information: %v", err)
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
// if we did not get any zones, lets leave it blank and gophercloud will
|
||||
// use zone "nova" as default
|
||||
if len(zones) > 0 {
|
||||
availability = volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
}
|
||||
}
|
||||
|
||||
volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
|
||||
if errr != nil {
|
||||
glog.V(2).Infof("Error creating cinder volume: %v", errr)
|
||||
return "", 0, nil, "", errr
|
||||
}
|
||||
glog.V(2).Infof("Successfully created cinder volume %s", volumeID)
|
||||
|
||||
// these are needed that pod is spawning to same AZ
|
||||
volumeLabels = make(map[string]string)
|
||||
if IgnoreVolumeAZ == false {
|
||||
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
|
||||
}
|
||||
return volumeID, volSizeGB, volumeLabels, fstype, nil
|
||||
}
|
||||
|
||||
func probeAttachedVolume() error {
|
||||
// rescan scsi bus
|
||||
scsiHostRescan()
|
||||
|
||||
executor := exec.New()
|
||||
args := []string{"trigger"}
|
||||
cmd := executor.Command("udevadm", args...)
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("error running udevadm trigger %v\n", err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Successfully probed all attachments")
|
||||
return nil
|
||||
}
|
||||
|
||||
func scsiHostRescan() {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := ioutil.ReadDir(scsi_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
ioutil.WriteFile(name, data, 0666)
|
||||
}
|
||||
}
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/volume/cinder/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/volume/cinder/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cinder contains the internal representation of cinder volumes.
|
||||
package cinder // import "k8s.io/kubernetes/pkg/volume/cinder"
|
Reference in New Issue
Block a user