vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

138
vendor/k8s.io/kubernetes/pkg/volume/BUILD generated vendored Normal file
View File

@ -0,0 +1,138 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"metrics_cached.go",
"metrics_du.go",
"metrics_errors.go",
"metrics_nil.go",
"metrics_statfs.go",
"plugins.go",
"util.go",
"volume.go",
"volume_unsupported.go",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"volume_linux.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"metrics_nil_test.go",
"plugins_test.go",
"util_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume",
library = ":go_default_library",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/util/slice:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = [
"metrics_statfs_test.go",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"metrics_du_test.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume_test",
deps = [
":go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/volume/aws_ebs:all-srcs",
"//pkg/volume/azure_dd:all-srcs",
"//pkg/volume/azure_file:all-srcs",
"//pkg/volume/cephfs:all-srcs",
"//pkg/volume/cinder:all-srcs",
"//pkg/volume/configmap:all-srcs",
"//pkg/volume/csi:all-srcs",
"//pkg/volume/downwardapi:all-srcs",
"//pkg/volume/empty_dir:all-srcs",
"//pkg/volume/fc:all-srcs",
"//pkg/volume/flexvolume:all-srcs",
"//pkg/volume/flocker:all-srcs",
"//pkg/volume/gce_pd:all-srcs",
"//pkg/volume/git_repo:all-srcs",
"//pkg/volume/glusterfs:all-srcs",
"//pkg/volume/host_path:all-srcs",
"//pkg/volume/iscsi:all-srcs",
"//pkg/volume/local:all-srcs",
"//pkg/volume/nfs:all-srcs",
"//pkg/volume/photon_pd:all-srcs",
"//pkg/volume/portworx:all-srcs",
"//pkg/volume/projected:all-srcs",
"//pkg/volume/quobyte:all-srcs",
"//pkg/volume/rbd:all-srcs",
"//pkg/volume/scaleio:all-srcs",
"//pkg/volume/secret:all-srcs",
"//pkg/volume/storageos:all-srcs",
"//pkg/volume/testing:all-srcs",
"//pkg/volume/util:all-srcs",
"//pkg/volume/validation:all-srcs",
"//pkg/volume/vsphere_volume:all-srcs",
],
tags = ["automanaged"],
)

17
vendor/k8s.io/kubernetes/pkg/volume/OWNERS generated vendored Normal file
View File

@ -0,0 +1,17 @@
approvers:
- saad-ali
- thockin
- jsafrane
- matchstick
- rootfs
- gnufied
- childsb
reviewers:
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42
- gnufied
- verult
- davidz627

68
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD generated vendored Normal file
View File

@ -0,0 +1,68 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"aws_ebs.go",
"aws_util.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"aws_ebs_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs",
library = ":go_default_library",
deps = [
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

13
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/OWNERS generated vendored Normal file
View File

@ -0,0 +1,13 @@
approvers:
- gnufied
- jingxu97
- jsafrane
- justinsb
reviewers:
- chrislovecnm
- gnufied
- jingxu97
- justinsb
- jsafrane
- rootfs
- saad-ali

282
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go generated vendored Normal file
View File

@ -0,0 +1,282 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path"
"strconv"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type awsElasticBlockStoreAttacher struct {
host volume.VolumeHost
awsVolumes aws.Volumes
}
var _ volume.Attacher = &awsElasticBlockStoreAttacher{}
var _ volume.AttachableVolumePlugin = &awsElasticBlockStorePlugin{}
func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error) {
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &awsElasticBlockStoreAttacher{
host: plugin.host,
awsVolumes: awsCloud,
}, nil
}
func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return "", err
}
volumeID := aws.KubernetesVolumeID(volumeSource.VolumeID)
// awsCloud.AttachDisk checks if disk is already attached to node and
// succeeds in that case, so no need to do that separately.
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly)
if err != nil {
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err)
return "", err
}
return devicePath, nil
}
func (attacher *awsElasticBlockStoreAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for AWS", nodeName)
volumeNodeMap := map[types.NodeName][]*volume.Spec{
nodeName: specs,
}
nodeVolumesResult := make(map[*volume.Spec]bool)
nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap)
if err != nil {
glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err)
return nodeVolumesResult, err
}
if result, ok := nodesVerificationMap[nodeName]; ok {
return result, nil
}
return nodeVolumesResult, nil
}
func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool)
diskNamesByNode := make(map[types.NodeName][]aws.KubernetesVolumeID)
volumeSpecMap := make(map[aws.KubernetesVolumeID]*volume.Spec)
for nodeName, volumeSpecs := range volumesByNode {
for _, volumeSpec := range volumeSpecs {
volumeSource, _, err := getVolumeSource(volumeSpec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err)
continue
}
name := aws.KubernetesVolumeID(volumeSource.VolumeID)
diskNamesByNode[nodeName] = append(diskNamesByNode[nodeName], name)
nodeDisk, nodeDiskExists := volumesAttachedCheck[nodeName]
if !nodeDiskExists {
nodeDisk = make(map[*volume.Spec]bool)
}
nodeDisk[volumeSpec] = true
volumeSpecMap[name] = volumeSpec
volumesAttachedCheck[nodeName] = nodeDisk
}
}
attachedResult, err := attacher.awsVolumes.DisksAreAttached(diskNamesByNode)
if err != nil {
glog.Errorf("Error checking if volumes are attached to nodes err = %v", err)
return volumesAttachedCheck, err
}
for nodeName, nodeDisks := range attachedResult {
for diskName, attached := range nodeDisks {
if !attached {
spec := volumeSpecMap[diskName]
setNodeDisk(volumesAttachedCheck, spec, nodeName, false)
}
}
}
return volumesAttachedCheck, nil
}
func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
volumeID := volumeSource.VolumeID
partition := ""
if volumeSource.Partition != 0 {
partition = strconv.Itoa(int(volumeSource.Partition))
}
if devicePath == "" {
return "", fmt.Errorf("WaitForAttach failed for AWS Volume %q: devicePath is empty.", volumeID)
}
ticker := time.NewTicker(checkSleepDuration)
defer ticker.Stop()
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
if devicePath != "" {
devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath)
path, err := verifyDevicePath(devicePaths)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err)
} else if path != "" {
// A device path has successfully been created for the PD
glog.Infof("Successfully found attached AWS Volume %q.", volumeID)
return path, nil
}
} else {
glog.V(5).Infof("AWS Volume (%q) is not attached yet", volumeID)
}
case <-timer.C:
return "", fmt.Errorf("Could not find attached AWS Volume %q. Timeout waiting for mount paths to be created.", volumeID)
}
}
}
func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return makeGlobalPDPath(attacher.host, aws.KubernetesVolumeID(volumeSource.VolumeID)), nil
}
// FIXME: this method can be further pruned.
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter(awsElasticBlockStorePluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if readOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
}
}
return nil
}
type awsElasticBlockStoreDetacher struct {
mounter mount.Interface
awsVolumes aws.Volumes
}
var _ volume.Detacher = &awsElasticBlockStoreDetacher{}
func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) {
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &awsElasticBlockStoreDetacher{
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
awsVolumes: awsCloud,
}, nil
}
func (detacher *awsElasticBlockStoreDetacher) Detach(volumeName string, nodeName types.NodeName) error {
volumeID := aws.KubernetesVolumeID(path.Base(volumeName))
if _, err := detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil {
glog.Errorf("Error detaching volumeID %q: %v", volumeID, err)
return err
}
return nil
}
func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath string) error {
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
}
func setNodeDisk(
nodeDiskMap map[types.NodeName]map[*volume.Spec]bool,
volumeSpec *volume.Spec,
nodeName types.NodeName,
check bool) {
volumeMap := nodeDiskMap[nodeName]
if volumeMap == nil {
volumeMap = make(map[*volume.Spec]bool)
nodeDiskMap[nodeName] = volumeMap
}
volumeMap[volumeSpec] = check
}

View File

@ -0,0 +1,302 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"errors"
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
)
func TestGetVolumeName_Volume(t *testing.T) {
plugin := newPlugin()
name := aws.KubernetesVolumeID("my-aws-volume")
spec := createVolSpec(name, false)
volumeName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetVolumeName error: %v", err)
}
if volumeName != string(name) {
t.Errorf("GetVolumeName error: expected %s, got %s", name, volumeName)
}
}
func TestGetVolumeName_PersistentVolume(t *testing.T) {
plugin := newPlugin()
name := aws.KubernetesVolumeID("my-aws-pv")
spec := createPVSpec(name, true)
volumeName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetVolumeName error: %v", err)
}
if volumeName != string(name) {
t.Errorf("GetVolumeName error: expected %s, got %s", name, volumeName)
}
}
// One testcase for TestAttachDetach table test below
type testcase struct {
name aws.KubernetesVolumeID
// For fake AWS:
attach attachCall
detach detachCall
t *testing.T
// Actual test to run
test func(test *testcase) (string, error)
// Expected return of the test
expectedDevice string
expectedError error
}
func TestAttachDetach(t *testing.T) {
diskName := aws.KubernetesVolumeID("disk")
nodeName := types.NodeName("instance")
readOnly := false
spec := createVolSpec(diskName, readOnly)
attachError := errors.New("Fake attach error")
detachError := errors.New("Fake detach error")
tests := []testcase{
// Successful Attach call
{
name: "Attach_Positive",
attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedDevice: "/dev/sda",
},
// Attach call fails
{
name: "Attach_Negative",
attach: attachCall{diskName, nodeName, readOnly, "", attachError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedError: attachError,
},
// Detach succeeds
{
name: "Detach_Positive",
detach: detachCall{diskName, nodeName, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
mountPath := "/mnt/" + string(diskName)
return "", detacher.Detach(mountPath, nodeName)
},
},
// Detach fails
{
name: "Detach_Negative",
detach: detachCall{diskName, nodeName, "", detachError},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
mountPath := "/mnt/" + string(diskName)
return "", detacher.Detach(mountPath, nodeName)
},
expectedError: detachError,
},
}
for _, testcase := range tests {
testcase.t = t
device, err := testcase.test(&testcase)
if err != testcase.expectedError {
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError.Error(), err.Error())
}
if device != testcase.expectedDevice {
t.Errorf("%s failed: expected device=%q, got %q", testcase.name, testcase.expectedDevice, device)
}
t.Logf("Test %q succeeded", testcase.name)
}
}
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
// and NewDetacher won't work.
func newPlugin() *awsElasticBlockStorePlugin {
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
plugins := ProbeVolumePlugins()
plugin := plugins[0]
plugin.Init(host)
return plugin.(*awsElasticBlockStorePlugin)
}
func newAttacher(testcase *testcase) *awsElasticBlockStoreAttacher {
return &awsElasticBlockStoreAttacher{
host: nil,
awsVolumes: testcase,
}
}
func newDetacher(testcase *testcase) *awsElasticBlockStoreDetacher {
return &awsElasticBlockStoreDetacher{
awsVolumes: testcase,
}
}
func createVolSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(name),
ReadOnly: readOnly,
},
},
},
}
}
func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
return &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(name),
ReadOnly: readOnly,
},
},
},
},
}
}
// Fake AWS implementation
type attachCall struct {
diskName aws.KubernetesVolumeID
nodeName types.NodeName
readOnly bool
retDeviceName string
ret error
}
type detachCall struct {
diskName aws.KubernetesVolumeID
nodeName types.NodeName
retDeviceName string
ret error
}
type diskIsAttachedCall struct {
diskName aws.KubernetesVolumeID
nodeName types.NodeName
isAttached bool
ret error
}
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) {
expected := &testcase.attach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return "", errors.New("Unexpected AttachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
}
if expected.readOnly != readOnly {
testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly)
return "", errors.New("Unexpected AttachDisk call: wrong readOnly")
}
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret
}
func (testcase *testcase) DetachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
expected := &testcase.detach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.detach looks uninitialized, test did not expect to call
// DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!")
return "", errors.New("Unexpected DetachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DetachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return "", errors.New("Unexpected DetachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", errors.New("Unexpected DetachDisk call: wrong nodeName")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret
}
func (testcase *testcase) DiskIsAttached(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
// DetachDisk no longer relies on DiskIsAttached api call
return false, nil
}
func (testcase *testcase) DisksAreAttached(nodeDisks map[types.NodeName][]aws.KubernetesVolumeID) (map[types.NodeName]map[aws.KubernetesVolumeID]bool, error) {
return nil, errors.New("Not implemented")
}
func (testcase *testcase) CreateDisk(volumeOptions *aws.VolumeOptions) (volumeName aws.KubernetesVolumeID, err error) {
return "", errors.New("Not implemented")
}
func (testcase *testcase) DeleteDisk(volumeName aws.KubernetesVolumeID) (bool, error) {
return false, errors.New("Not implemented")
}
func (testcase *testcase) GetVolumeLabels(volumeName aws.KubernetesVolumeID) (map[string]string, error) {
return map[string]string{}, errors.New("Not implemented")
}
func (testcase *testcase) GetDiskPath(volumeName aws.KubernetesVolumeID) (string, error) {
return "", errors.New("Not implemented")
}
func (testcase *testcase) ResizeDisk(
volumeName aws.KubernetesVolumeID,
oldSize resource.Quantity,
newSize resource.Quantity) (resource.Quantity, error) {
return oldSize, errors.New("Not implemented")
}

513
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go generated vendored Normal file
View File

@ -0,0 +1,513 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}}
}
type awsElasticBlockStorePlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
const (
awsElasticBlockStorePluginName = "kubernetes.io/aws-ebs"
awsURLNamePrefix = "aws://"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(awsElasticBlockStorePluginName), volName)
}
func (plugin *awsElasticBlockStorePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *awsElasticBlockStorePlugin) GetPluginName() string {
return awsElasticBlockStorePluginName
}
func (plugin *awsElasticBlockStorePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.VolumeID, nil
}
func (plugin *awsElasticBlockStorePlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore != nil) ||
(spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil)
}
func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool {
return false
}
func (plugin *awsElasticBlockStorePlugin) SupportsMountOption() bool {
return true
}
func (plugin *awsElasticBlockStorePlugin) SupportsBulkVolumeVerification() bool {
return true
}
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
ebs, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
fsType := ebs.FSType
partition := ""
if ebs.Partition != 0 {
partition = strconv.Itoa(int(ebs.Partition))
}
return &awsElasticBlockStoreMounter{
awsElasticBlockStore: &awsElasticBlockStore{
podUID: podUID,
volName: spec.Name(),
volumeID: volumeID,
partition: partition,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
fsType: fsType,
readOnly: readOnly,
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *awsElasticBlockStorePlugin) newUnmounterInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Unmounter, error) {
return &awsElasticBlockStoreUnmounter{&awsElasticBlockStore{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &AWSDiskUtil{})
}
func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil {
glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
}
return &awsElasticBlockStoreDeleter{
awsElasticBlockStore: &awsElasticBlockStore{
volName: spec.Name(),
volumeID: aws.KubernetesVolumeID(spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID),
manager: manager,
plugin: plugin,
}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &AWSDiskUtil{})
}
func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.VolumeOptions, manager ebsManager) (volume.Provisioner, error) {
return &awsElasticBlockStoreProvisioner{
awsElasticBlockStore: &awsElasticBlockStore{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func getVolumeSource(
spec *volume.Spec) (*v1.AWSElasticBlockStoreVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
return spec.Volume.AWSElasticBlockStore, spec.Volume.AWSElasticBlockStore.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.AWSElasticBlockStore != nil {
return spec.PersistentVolume.Spec.AWSElasticBlockStore, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference an AWS EBS volume type")
}
func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
volumeID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
// This is a workaround to fix the issue in converting aws volume id from globalPDPath
// There are three aws volume id formats and their volumeID from GetDeviceNameFromMount() are:
// aws:///vol-1234 (aws/vol-1234)
// aws://us-east-1/vol-1234 (aws/us-east-1/vol-1234)
// vol-1234 (vol-1234)
// This code is for converting volume id to aws style volume id for the first two cases.
sourceName := volumeID
if strings.HasPrefix(volumeID, "aws/") {
names := strings.Split(volumeID, "/")
length := len(names)
if length < 2 || length > 3 {
return nil, fmt.Errorf("Failed to get AWS volume id from mount path %q: invalid volume name format %q", mountPath, volumeID)
}
volName := names[length-1]
if !strings.HasPrefix(volName, "vol-") {
return nil, fmt.Errorf("Invalid volume name format for AWS volume (%q) retrieved from mount path %q", volName, mountPath)
}
if length == 2 {
sourceName = awsURLNamePrefix + "" + "/" + volName // empty zone label
}
if length == 3 {
sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label
}
glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName)
}
awsVolume := &v1.Volume{
Name: volName,
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: sourceName,
},
},
}
return volume.NewSpecFromVolume(awsVolume), nil
}
func (plugin *awsElasticBlockStorePlugin) RequiresFSResize() bool {
return true
}
func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice(
spec *volume.Spec,
newSize resource.Quantity,
oldSize resource.Quantity) (resource.Quantity, error) {
var awsVolume aws.Volumes
awsVolume, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return oldSize, err
}
// we don't expect to receive this call for non PVs
rawVolumeName := spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID
volumeID := aws.KubernetesVolumeID(rawVolumeName)
if volumeID == "" {
return oldSize, fmt.Errorf("EBS.ExpandVolumeDevice Invalid volume id for %s", spec.Name())
}
return awsVolume.ResizeDisk(volumeID, oldSize, newSize)
}
var _ volume.ExpandableVolumePlugin = &awsElasticBlockStorePlugin{}
// Abstract interface to PD operations.
type ebsManager interface {
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *awsElasticBlockStoreDeleter) error
}
// awsElasticBlockStore volumes are disk resources provided by Amazon Web Services
// that are attached to the kubelet's host machine and exposed to the pod.
type awsElasticBlockStore struct {
volName string
podUID types.UID
// Unique id of the PD, used to find the disk resource in the provider.
volumeID aws.KubernetesVolumeID
// Specifies the partition to mount
partition string
// Utility interface that provides API calls to the provider to attach/detach disks.
manager ebsManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
plugin *awsElasticBlockStorePlugin
volume.MetricsProvider
}
type awsElasticBlockStoreMounter struct {
*awsElasticBlockStore
// Filesystem type, optional.
fsType string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter *mount.SafeFormatAndMount
}
var _ volume.Mounter = &awsElasticBlockStoreMounter{}
func (b *awsElasticBlockStoreMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *awsElasticBlockStoreMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mount point: %s %v", dir, err)
return err
}
if !notMnt {
return nil
}
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volumeID)
if err := os.MkdirAll(dir, 0750); err != nil {
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("failed to unmount %s: %v", dir, mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
glog.Errorf("Mount of disk %s failed: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(4).Infof("Successfully mounted %s", dir)
return nil
}
func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) string {
// Clean up the URI to be more fs-friendly
name := string(volumeID)
name = strings.Replace(name, "://", "/", -1)
return path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
}
// Reverses the mapping done in makeGlobalPDPath
func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
basePath := path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
rel, err := filepath.Rel(basePath, globalPath)
if err != nil {
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
return "", err
}
if strings.Contains(rel, "../") {
glog.Errorf("Unexpected mount path: %s", globalPath)
return "", fmt.Errorf("unexpected mount path: " + globalPath)
}
// Reverse the :// replacement done in makeGlobalPDPath
volumeID := rel
if strings.HasPrefix(volumeID, "aws/") {
volumeID = strings.Replace(volumeID, "aws/", "aws://", 1)
}
glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID)
return volumeID, nil
}
func (ebs *awsElasticBlockStore) GetPath() string {
return getPath(ebs.podUID, ebs.volName, ebs.plugin.host)
}
type awsElasticBlockStoreUnmounter struct {
*awsElasticBlockStore
}
var _ volume.Unmounter = &awsElasticBlockStoreUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *awsElasticBlockStoreUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount
func (c *awsElasticBlockStoreUnmounter) TearDownAt(dir string) error {
return util.UnmountPath(dir, c.mounter)
}
type awsElasticBlockStoreDeleter struct {
*awsElasticBlockStore
}
var _ volume.Deleter = &awsElasticBlockStoreDeleter{}
func (d *awsElasticBlockStoreDeleter) GetPath() string {
return getPath(d.podUID, d.volName, d.plugin.host)
}
func (d *awsElasticBlockStoreDeleter) Delete() error {
return d.manager.DeleteVolume(d)
}
type awsElasticBlockStoreProvisioner struct {
*awsElasticBlockStore
options volume.VolumeOptions
namespace string
}
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
if err != nil {
glog.Errorf("Provision failed: %v", err)
return nil, err
}
if fstype == "" {
fstype = "ext4"
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
volumehelper.VolumeDynamicallyCreatedByKey: "aws-ebs-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(volumeID),
FSType: fstype,
Partition: 0,
ReadOnly: false,
},
},
MountOptions: c.options.MountOptions,
},
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
}
}
return pv, nil
}

View File

@ -0,0 +1,302 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/aws-ebs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) {
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteOnce)
}
if volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected not to support AccessModeTypes: %s", v1.ReadOnlyMany)
}
}
type fakePDManager struct {
}
// TODO(jonesdl) To fully test this, we could create a loopback device
// and mount that instead.
func (fake *fakePDManager) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error) {
labels = make(map[string]string)
labels["fakepdmanager"] = "yes"
return "test-aws-volume-name", 100, labels, "", nil
}
func (fake *fakePDManager) DeleteVolume(cd *awsElasticBlockStoreDeleter) error {
if cd.volumeID != "test-aws-volume-name" {
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.volumeID)
}
return nil
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
},
}
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
fakeManager = &fakePDManager{}
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
if err != nil {
t.Errorf("Error creating new provisioner:%v", err)
}
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID != "test-aws-volume-name" {
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID)
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
if persistentSpec.Labels["fakepdmanager"] != "yes" {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*awsElasticBlockStorePlugin).newDeleterInternal(volSpec, &fakePDManager{})
if err != nil {
t.Errorf("Error creating new deleter:%v", err)
}
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
clientset := fake.NewSimpleClientset(pv, claim)
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, clientset, nil))
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
},
}
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Error creating new mounter:%v", err)
}
if _, ok := mounter.(volume.Unmounter); ok {
t.Errorf("Volume Mounter can be type-assert to Unmounter")
}
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Error creating new unmounter:%v", err)
}
if _, ok := unmounter.(volume.Mounter); ok {
t.Errorf("Volume Unmounter can be type-assert to Mounter")
}
}

255
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
diskPartitionSuffix = ""
diskXVDPath = "/dev/xvd"
diskXVDPattern = "/dev/xvd*"
maxChecks = 60
maxRetries = 10
checkSleepDuration = time.Second
errorSleepDuration = 5 * time.Second
)
type AWSDiskUtil struct{}
func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error {
cloud, err := getCloudProvider(d.awsElasticBlockStore.plugin.host.GetCloudProvider())
if err != nil {
return err
}
deleted, err := cloud.DeleteDisk(d.volumeID)
if err != nil {
// AWS cloud provider returns volume.deletedVolumeInUseError when
// necessary, no handling needed here.
glog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err)
return err
}
if deleted {
glog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID)
} else {
glog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID)
}
return nil
}
// CreateVolume creates an AWS EBS volume.
// Returns: volumeID, volumeSizeGB, labels, error
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.KubernetesVolumeID, int, map[string]string, string, error) {
cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider())
if err != nil {
return "", 0, nil, "", err
}
// AWS volumes don't have Name field, store the name in Name tag
var tags map[string]string
if c.options.CloudTags == nil {
tags = make(map[string]string)
} else {
tags = *c.options.CloudTags
}
tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
// AWS works with gigabytes, convert to GiB with rounding up
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
volumeOptions := &aws.VolumeOptions{
CapacityGB: requestGB,
Tags: tags,
PVCName: c.options.PVC.Name,
}
fstype := ""
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
volumeOptions.ZonePresent = false
volumeOptions.ZonesPresent = false
for k, v := range c.options.Parameters {
switch strings.ToLower(k) {
case "type":
volumeOptions.VolumeType = v
case "zone":
volumeOptions.ZonePresent = true
volumeOptions.AvailabilityZone = v
case "zones":
volumeOptions.ZonesPresent = true
volumeOptions.AvailabilityZones = v
case "iopspergb":
volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
if err != nil {
return "", 0, nil, "", fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
}
case "encrypted":
volumeOptions.Encrypted, err = strconv.ParseBool(v)
if err != nil {
return "", 0, nil, "", fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
}
case "kmskeyid":
volumeOptions.KmsKeyId = v
case volume.VolumeParameterFSType:
fstype = v
default:
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
}
}
if volumeOptions.ZonePresent && volumeOptions.ZonesPresent {
return "", 0, nil, "", fmt.Errorf("both zone and zones StorageClass parameters must not be used at the same time")
}
// TODO: implement PVC.Selector parsing
if c.options.PVC.Spec.Selector != nil {
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS")
}
name, err := cloud.CreateDisk(volumeOptions)
if err != nil {
glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created EBS Disk volume %s", name)
labels, err := cloud.GetVolumeLabels(name)
if err != nil {
// We don't really want to leak the volume here...
glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
}
return name, int(requestGB), labels, fstype, nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyDevicePath(devicePaths []string) (string, error) {
for _, path := range devicePaths {
if pathExists, err := volumeutil.PathExists(path); err != nil {
return "", fmt.Errorf("Error checking if path exists: %v", err)
} else if pathExists {
return path, nil
}
}
return "", nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
allPathsRemoved := true
for _, path := range devicePaths {
if exists, err := volumeutil.PathExists(path); err != nil {
return false, fmt.Errorf("Error checking if path exists: %v", err)
} else {
allPathsRemoved = allPathsRemoved && !exists
}
}
return allPathsRemoved, nil
}
// Returns list of all paths for given EBS mount
// This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id)
// Here it is mostly about applying the partition path
func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string {
devicePaths := []string{}
if devicePath != "" {
devicePaths = append(devicePaths, devicePath)
}
if partition != "" {
for i, path := range devicePaths {
devicePaths[i] = path + diskPartitionSuffix + partition
}
}
// We need to find NVME volumes, which are mounted on a "random" nvme path ("/dev/nvme0n1"),
// and we have to get the volume id from the nvme interface
awsVolumeID, err := volumeID.MapToAWSVolumeID()
if err != nil {
glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err)
} else {
// This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/
// For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23
nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1)
nvmePath, err := findNvmeVolume(nvmeName)
if err != nil {
glog.Warningf("error looking for nvme volume %q: %v", volumeID, err)
} else if nvmePath != "" {
devicePaths = append(devicePaths, nvmePath)
}
}
return devicePaths
}
// Return cloud provider
func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error) {
awsCloudProvider, ok := cloudProvider.(*aws.Cloud)
if !ok || awsCloudProvider == nil {
return nil, fmt.Errorf("Failed to get AWS Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return awsCloudProvider, nil
}
// findNvmeVolume looks for the nvme volume with the specified name
// It follows the symlink (if it exists) and returns the absolute path to the device
func findNvmeVolume(findName string) (device string, err error) {
p := filepath.Join("/dev/disk/by-id/", findName)
stat, err := os.Lstat(p)
if err != nil {
if os.IsNotExist(err) {
glog.V(6).Infof("nvme path not found %q", p)
return "", nil
}
return "", fmt.Errorf("error getting stat of %q: %v", p, err)
}
if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
glog.Warningf("nvme file %q found, but was not a symlink", p)
return "", nil
}
// Find the target, resolving to an absolute path
// For example, /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 -> ../../nvme2n1
resolved, err := filepath.EvalSymlinks(p)
if err != nil {
return "", fmt.Errorf("error reading target of symlink %q: %v", p, err)
}
if !strings.HasPrefix(resolved, "/dev") {
return "", fmt.Errorf("resolved symlink for %q was unexpected: %q", p, resolved)
}
return resolved, nil
}

19
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package aws_ebs contains the internal representation of AWS Elastic
// Block Store volumes.
package aws_ebs // import "k8s.io/kubernetes/pkg/volume/aws_ebs"

78
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD generated vendored Normal file
View File

@ -0,0 +1,78 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"azure_common.go",
"azure_common_unsupported.go",
"azure_dd.go",
"azure_mounter.go",
"azure_provision.go",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"azure_common_linux.go",
],
"@io_bazel_rules_go//go/platform:windows_amd64": [
"azure_common_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/azure_dd",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = [
"azure_common_test.go",
"azure_dd_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/azure_dd",
library = ":go_default_library",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

11
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS generated vendored Executable file
View File

@ -0,0 +1,11 @@
approvers:
- brendandburns
- rootfs
reviewers:
- rootfs
- brendandburns
- saad-ali
- jsafrane
- jingxu97
- msau42
- andyzhangx

View File

@ -0,0 +1,294 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type azureDiskDetacher struct {
plugin *azureDataDiskPlugin
cloud *azure.Cloud
}
type azureDiskAttacher struct {
plugin *azureDataDiskPlugin
cloud *azure.Cloud
}
var _ volume.Attacher = &azureDiskAttacher{}
var _ volume.Detacher = &azureDiskDetacher{}
// acquire lock to get an lun number
var getLunMutex = keymutex.NewKeyMutex()
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
glog.Warningf("failed to get azure disk spec")
return "", err
}
instanceid, err := a.cloud.InstanceID(nodeName)
if err != nil {
glog.Warningf("failed to get azure instance id")
return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
}
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):]
}
diskController, err := getDiskController(a.plugin.host)
if err != nil {
return "", err
}
lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
if err == cloudprovider.InstanceNotFound {
// Log error and continue with attach
glog.Warningf(
"Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v",
instanceid, err)
}
if err == nil {
// Volume is already attached to node.
glog.V(4).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
} else {
glog.V(4).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName)
getLunMutex.LockKey(instanceid)
defer getLunMutex.UnlockKey(instanceid)
lun, err = diskController.GetNextDiskLun(nodeName)
if err != nil {
glog.Warningf("no LUN available for instance %q", nodeName)
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
}
glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
if err == nil {
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
} else {
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
}
}
return strconv.Itoa(int(lun)), err
}
func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
volumeSpecMap := make(map[string]*volume.Spec)
volumeIDList := []string{}
for _, spec := range specs {
volumeSource, err := getVolumeSource(spec)
if err != nil {
glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
volumeIDList = append(volumeIDList, volumeSource.DiskName)
volumesAttachedCheck[spec] = true
volumeSpecMap[volumeSource.DiskName] = spec
}
diskController, err := getDiskController(a.plugin.host)
if err != nil {
return nil, err
}
attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
}
for volumeID, attached := range attachedResult {
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
}
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
var err error
lun, err := strconv.Atoi(devicePath)
if err != nil {
return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath)
}
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
exec := a.plugin.host.GetExec(a.plugin.GetPluginName())
io := &osIOHandler{}
scsiHostRescan(io, exec)
diskName := volumeSource.DiskName
nodeName := a.plugin.host.GetHostName()
newDevicePath := ""
err = wait.Poll(1*time.Second, timeout, func() (bool, error) {
if newDevicePath, err = findDiskByLun(lun, io, exec); err != nil {
return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err)
}
// did we find it?
if newDevicePath != "" {
// the curent sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on
// Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve
// the root case on Azure.
formatIfNotFormatted(newDevicePath, *volumeSource.FSType, exec)
return true, nil
}
return false, fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun)
})
return newDevicePath, err
}
// to avoid name conflicts (similar *.vhd name)
// we use hash diskUri and we use it as device mount target.
// this is generalized for both managed and blob disks
// we also prefix the hash with m/b based on disk kind
func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
if volumeSource.Kind == nil { // this spec was constructed from info on the node
pdPath := path.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI)
return pdPath, nil
}
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
}
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.plugin.host.GetMounter(azureDataDiskPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
dir := deviceMountPath
if runtime.GOOS == "windows" {
// in windows, as we use mklink, only need to MkdirAll for parent directory
dir = filepath.Dir(deviceMountPath)
}
if err := os.MkdirAll(dir, 0750); err != nil {
return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err)
}
notMnt = true
} else {
return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err)
}
}
volumeSource, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if notMnt {
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
if err != nil {
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr)
}
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err)
}
}
return nil
}
// Detach detaches disk from Azure VM.
func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error {
if diskURI == "" {
return fmt.Errorf("invalid disk to detach: %q", diskURI)
}
instanceid, err := d.cloud.InstanceID(nodeName)
if err != nil {
glog.Warningf("no instance id for node %q, skip detaching", nodeName)
return nil
}
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):]
}
glog.V(4).Infof("detach %v from node %q", diskURI, nodeName)
diskController, err := getDiskController(d.plugin.host)
if err != nil {
return err
}
err = diskController.DetachDiskByName("", diskURI, nodeName)
if err != nil {
glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
}
glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName)
return err
}
// UnmountDevice unmounts the volume on the node
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
if err == nil {
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
} else {
glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
}
return err
}

View File

@ -0,0 +1,206 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"io/ioutil"
"os"
"path"
libstrings "strings"
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
const (
defaultFSType = "ext4"
defaultStorageAccountType = storage.StandardLRS
defaultAzureDiskKind = v1.AzureSharedBlobDisk
)
type dataDisk struct {
volume.MetricsProvider
volumeName string
diskName string
podUID types.UID
}
var (
supportedCachingModes = sets.NewString(
string(api.AzureDataDiskCachingNone),
string(api.AzureDataDiskCachingReadOnly),
string(api.AzureDataDiskCachingReadWrite))
supportedDiskKinds = sets.NewString(
string(api.AzureSharedBlobDisk),
string(api.AzureDedicatedBlobDisk),
string(api.AzureManagedDisk))
supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS", "Standard_GRS", "Standard_RAGRS")
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(azureDataDiskPluginName), volName)
}
// creates a unique path for disks (even if they share the same *.vhd name)
func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) {
diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps.
uniqueDiskNameTemplate := "%s%s"
hashedDiskUri := azure.MakeCRC32(diskUri)
prefix := "b"
if isManaged {
prefix = "m"
}
// "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }"
diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri)
pdPath := path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName)
return pdPath, nil
}
func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk {
var metricProvider volume.MetricsProvider
if podUID != "" {
metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host))
}
return &dataDisk{
MetricsProvider: metricProvider,
volumeName: volumeName,
diskName: diskName,
podUID: podUID,
}
}
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
return spec.Volume.AzureDisk, nil
}
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
return spec.PersistentVolume.Spec.AzureDisk, nil
}
return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type")
}
func normalizeFsType(fsType string) string {
if fsType == "" {
return defaultFSType
}
return fsType
}
func normalizeKind(kind string) (v1.AzureDataDiskKind, error) {
if kind == "" {
return defaultAzureDiskKind, nil
}
if !supportedDiskKinds.Has(kind) {
return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List())
}
return v1.AzureDataDiskKind(kind), nil
}
func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) {
if storageAccountType == "" {
return defaultStorageAccountType, nil
}
if !supportedStorageAccountTypes.Has(storageAccountType) {
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List())
}
return storage.SkuName(storageAccountType), nil
}
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {
if cachingMode == "" {
return v1.AzureDataDiskCachingReadWrite, nil
}
if !supportedCachingModes.Has(string(cachingMode)) {
return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List())
}
return cachingMode, nil
}
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
Readlink(name string) (string, error)
ReadFile(filename string) ([]byte, error)
}
//TODO: check if priming the iscsi interface is actually needed
type osIOHandler struct{}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
func (handler *osIOHandler) Readlink(name string) (string, error) {
return os.Readlink(name)
}
func (handler *osIOHandler) ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func getDiskController(host volume.VolumeHost) (DiskController, error) {
cloudProvider := host.GetCloudProvider()
az, ok := cloudProvider.(*azure.Cloud)
if !ok || az == nil {
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return az, nil
}
func getCloud(host volume.VolumeHost) (*azure.Cloud, error) {
cloudProvider := host.GetCloudProvider()
az, ok := cloudProvider.(*azure.Cloud)
if !ok || az == nil {
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return az, nil
}
func strFirstLetterToUpper(str string) string {
if len(str) < 2 {
return str
}
return libstrings.ToUpper(string(str[0])) + str[1:]
}

View File

@ -0,0 +1,189 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"path"
"strconv"
libstrings "strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
)
// exclude those used by azure as resource and OS root in /dev/disk/azure
func listAzureDiskPath(io ioHandler) []string {
azureDiskPath := "/dev/disk/azure/"
var azureDiskList []string
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
for _, f := range dirs {
name := f.Name()
diskPath := azureDiskPath + name
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
sd := link[(libstrings.LastIndex(link, "/") + 1):]
azureDiskList = append(azureDiskList, sd)
}
}
}
glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
return azureDiskList
}
func scsiHostRescan(io ioHandler, exec mount.Exec) {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
if err = io.WriteFile(name, data, 0666); err != nil {
glog.Warningf("failed to rescan scsi host %s", name)
}
}
} else {
glog.Warningf("failed to read %s, err %v", scsi_path, err)
}
}
func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) {
azureDisks := listAzureDiskPath(io)
return findDiskByLunWithConstraint(lun, io, azureDisks)
}
// finds a device mounted to "current" node
func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (string, error) {
var err error
sys_path := "/sys/bus/scsi/devices"
if dirs, err := io.ReadDir(sys_path); err == nil {
for _, f := range dirs {
name := f.Name()
// look for path like /sys/bus/scsi/devices/3:0:0:1
arr := libstrings.Split(name, ":")
if len(arr) < 4 {
continue
}
if len(azureDisks) == 0 {
glog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name)
target, err := strconv.Atoi(arr[0])
if err != nil {
glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
continue
}
// as observed, targets 0-3 are used by OS disks. Skip them
if target <= 3 {
continue
}
}
// extract LUN from the path.
// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
l, err := strconv.Atoi(arr[3])
if err != nil {
// unknown path format, continue to read the next one
glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err)
continue
}
if lun == l {
// find the matching LUN
// read vendor and model to ensure it is a VHD disk
vendorPath := path.Join(sys_path, name, "vendor")
vendorBytes, err := io.ReadFile(vendorPath)
if err != nil {
glog.Errorf("failed to read device vendor, err: %v", err)
continue
}
vendor := libstrings.TrimSpace(string(vendorBytes))
if libstrings.ToUpper(vendor) != "MSFT" {
glog.V(4).Infof("vendor doesn't match VHD, got %s", vendor)
continue
}
modelPath := path.Join(sys_path, name, "model")
modelBytes, err := io.ReadFile(modelPath)
if err != nil {
glog.Errorf("failed to read device model, err: %v", err)
continue
}
model := libstrings.TrimSpace(string(modelBytes))
if libstrings.ToUpper(model) != "VIRTUAL DISK" {
glog.V(4).Infof("model doesn't match VHD, got %s", model)
continue
}
// find a disk, validate name
dir := path.Join(sys_path, name, "block")
if dev, err := io.ReadDir(dir); err == nil {
found := false
for _, diskName := range azureDisks {
glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName)
if string(dev[0].Name()) == diskName {
found = true
break
}
}
if !found {
return "/dev/" + dev[0].Name(), nil
}
}
}
}
}
return "", err
}
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
notFormatted, err := diskLooksUnformatted(disk, exec)
if err == nil && notFormatted {
args := []string{disk}
// Disk is unformatted so format it.
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", disk}
}
glog.Infof("azureDisk - Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", disk, fstype, args)
_, err := exec.Run("mkfs."+fstype, args...)
if err == nil {
// the disk has been formatted successfully try to mount it again.
glog.Infof("azureDisk - Disk successfully formatted with 'mkfs.%s %v'", fstype, args)
} else {
glog.Warningf("azureDisk - Error formatting volume with 'mkfs.%s %v': %v", fstype, args, err)
}
} else {
if err != nil {
glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err)
} else {
glog.Infof("azureDisk - Disk %s already formatted, will not format", disk)
}
}
}
func diskLooksUnformatted(disk string, exec mount.Exec) (bool, error) {
args := []string{"-nd", "-o", "FSTYPE", disk}
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
dataOut, err := exec.Run("lsblk", args...)
if err != nil {
glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return false, err
}
output := libstrings.TrimSpace(string(dataOut))
return output == "", nil
}

View File

@ -0,0 +1,136 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"runtime"
"strings"
"testing"
"time"
"k8s.io/kubernetes/pkg/util/mount"
)
type fakeFileInfo struct {
name string
}
func (fi *fakeFileInfo) Name() string {
return fi.name
}
func (fi *fakeFileInfo) Size() int64 {
return 0
}
func (fi *fakeFileInfo) Mode() os.FileMode {
return 777
}
func (fi *fakeFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *fakeFileInfo) IsDir() bool {
return false
}
func (fi *fakeFileInfo) Sys() interface{} {
return nil
}
var (
lun = 1
lunStr = "1"
diskPath = "4:0:0:" + lunStr
devName = "sdd"
lun1 = 2
lunStr1 = "2"
diskPath1 = "3:0:0:" + lunStr1
devName1 = "sde"
)
type fakeIOHandler struct{}
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/sys/bus/scsi/devices":
f1 := &fakeFileInfo{
name: "3:0:0:1",
}
f2 := &fakeFileInfo{
name: "4:0:0:0",
}
f3 := &fakeFileInfo{
name: diskPath,
}
f4 := &fakeFileInfo{
name: "host1",
}
f5 := &fakeFileInfo{
name: "target2:0:0",
}
return []os.FileInfo{f1, f2, f3, f4, f5}, nil
case "/sys/bus/scsi/devices/" + diskPath + "/block":
n := &fakeFileInfo{
name: devName,
}
return []os.FileInfo{n}, nil
case "/sys/bus/scsi/devices/" + diskPath1 + "/block":
n := &fakeFileInfo{
name: devName1,
}
return []os.FileInfo{n}, nil
}
return nil, fmt.Errorf("bad dir")
}
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return nil
}
func (handler *fakeIOHandler) Readlink(name string) (string, error) {
return "/dev/azure/disk/sda", nil
}
func (handler *fakeIOHandler) ReadFile(filename string) ([]byte, error) {
if strings.HasSuffix(filename, "vendor") {
return []byte("Msft \n"), nil
}
if strings.HasSuffix(filename, "model") {
return []byte("Virtual Disk \n"), nil
}
return nil, fmt.Errorf("unknown file")
}
func TestIoHandler(t *testing.T) {
if runtime.GOOS != "windows" && runtime.GOOS != "linux" {
t.Skipf("TestIoHandler not supported on GOOS=%s", runtime.GOOS)
}
disk, err := findDiskByLun(lun, &fakeIOHandler{}, mount.NewOsExec())
if runtime.GOOS == "windows" {
if err != nil {
t.Errorf("no data disk found: disk %v err %v", disk, err)
}
} else {
// if no disk matches lun, exit
if disk != "/dev/"+devName || err != nil {
t.Errorf("no data disk found: disk %v err %v", disk, err)
}
}
}

View File

@ -0,0 +1,31 @@
// +build !linux,!windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import "k8s.io/kubernetes/pkg/util/mount"
func scsiHostRescan(io ioHandler, exec mount.Exec) {
}
func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) {
return "", nil
}
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
}

View File

@ -0,0 +1,114 @@
// +build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
)
func scsiHostRescan(io ioHandler, exec mount.Exec) {
cmd := "Update-HostStorageCache"
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output))
}
}
// search Windows disk number by LUN
func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error) {
cmd := `Get-Disk | select number, location | ConvertTo-Json`
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output))
return "", err
}
if len(output) < 10 {
return "", fmt.Errorf("Get-Disk output is too short, output: %q", string(output))
}
var data []map[string]interface{}
if err = json.Unmarshal(output, &data); err != nil {
glog.Errorf("Get-Disk output is not a json array, output: %q", string(output))
return "", err
}
for _, v := range data {
if jsonLocation, ok := v["location"]; ok {
if location, ok := jsonLocation.(string); ok {
if !strings.Contains(location, " LUN ") {
continue
}
arr := strings.Split(location, " ")
arrLen := len(arr)
if arrLen < 3 {
glog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation)
continue
}
glog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1])
//last element of location field is LUN number, e.g.
// "location": "Integrated : Adapter 3 : Port 0 : Target 0 : LUN 1"
l, err := strconv.Atoi(arr[arrLen-1])
if err != nil {
glog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1])
continue
}
if l == lun {
glog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun)
if d, ok := v["number"]; ok {
if diskNum, ok := d.(float64); ok {
glog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun)
return strconv.Itoa(int(diskNum)), nil
}
glog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location)
}
return "", fmt.Errorf("LUN(%d) found, but could not get disk number, location: %q", lun, location)
}
}
}
}
return "", nil
}
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
if err := mount.ValidateDiskNumber(disk); err != nil {
glog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err)
return
}
cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru", disk)
cmd += " | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem NTFS -Confirm:$false"
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output))
} else {
glog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype)
}
}

View File

@ -0,0 +1,215 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"github.com/Azure/azure-sdk-for-go/arm/compute"
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
// interface exposed by the cloud provider implementing Disk functionlity
type DiskController interface {
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
DeleteBlobDisk(diskUri string) error
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error)
DeleteManagedDisk(diskURI string) error
// Attaches the disk to the host machine.
AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
// Detaches the disk, identified by disk name or uri, from the host machine.
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
// Check if a list of volumes are attached to the node with the specified NodeName
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
// Get the LUN number of the disk that is attached to the host
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
// Get the next available LUN number to attach a new VHD
GetNextDiskLun(nodeName types.NodeName) (int32, error)
// Create a VHD blob
CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error)
// Delete a VHD blob
DeleteVolume(diskURI string) error
}
type azureDataDiskPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
const (
azureDataDiskPluginName = "kubernetes.io/azure-disk"
)
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
}
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *azureDataDiskPlugin) GetPluginName() string {
return azureDataDiskPluginName
}
func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.DataDiskURI, nil
}
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil) ||
(spec.Volume != nil && spec.Volume.AzureDisk != nil)
}
func (plugin *azureDataDiskPlugin) RequiresRemount() bool {
return false
}
func (plugin *azureDataDiskPlugin) SupportsMountOption() bool {
return true
}
func (plugin *azureDataDiskPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
// NewAttacher initializes an Attacher
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err)
return nil, err
}
return &azureDiskAttacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
return nil, err
}
return &azureDiskDetacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host)
return &azureDiskDeleter{
spec: spec,
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
if len(options.PVC.Spec.AccessModes) == 0 {
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
}
return &azureDiskProvisioner{
plugin: plugin,
options: options,
}, nil
}
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host)
return &azureDiskMounter{
plugin: plugin,
spec: spec,
options: options,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
disk := makeDataDisk(volName, podUID, "", plugin.host)
return &azureDiskUnmounter{
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
azureVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DataDiskURI: sourceName,
},
},
}
return volume.NewSpecFromVolume(azureVolume), nil
}
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
}

View File

@ -0,0 +1,55 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"os"
"testing"
"k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("azure_dd")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != azureDataDiskPluginName {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
// fakeAzureProvider type was removed because all functions were not used
// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test.

View File

@ -0,0 +1,195 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"runtime"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
type azureDiskMounter struct {
*dataDisk
spec *volume.Spec
plugin *azureDataDiskPlugin
options volume.VolumeOptions
}
type azureDiskUnmounter struct {
*dataDisk
plugin *azureDataDiskPlugin
}
var _ volume.Unmounter = &azureDiskUnmounter{}
var _ volume.Mounter = &azureDiskMounter{}
func (m *azureDiskMounter) GetAttributes() volume.Attributes {
readOnly := false
volumeSource, err := getVolumeSource(m.spec)
if err != nil {
glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err)
} else if volumeSource.ReadOnly != nil {
readOnly = *volumeSource.ReadOnly
}
return volume.Attributes{
ReadOnly: readOnly,
Managed: !readOnly,
SupportsSELinux: true,
}
}
func (m *azureDiskMounter) CanMount() error {
return nil
}
func (m *azureDiskMounter) SetUp(fsGroup *int64) error {
return m.SetUpAt(m.GetPath(), fsGroup)
}
func (m *azureDiskMounter) GetPath() string {
return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host)
}
func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
mounter := m.plugin.host.GetMounter(m.plugin.GetPluginName())
volumeSource, err := getVolumeSource(m.spec)
if err != nil {
glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
return err
}
diskName := volumeSource.DiskName
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err)
return err
}
if !mountPoint {
glog.V(4).Infof("azureDisk - already mounted to target %s", dir)
return nil
}
if runtime.GOOS != "windows" {
// in windows, we will use mklink to mount, will MkdirAll in Mount func
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err)
return err
}
}
options := []string{"bind"}
if volumeSource.ReadOnly != nil && *volumeSource.ReadOnly {
options = append(options, "ro")
}
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
if err != nil {
return err
}
mountErr := mounter.Mount(globalPDPath, dir, *volumeSource.FSType, options)
// Everything in the following control flow is meant as an
// attempt cleanup a failed setupAt (bind mount)
if mountErr != nil {
glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr)
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr)
}
if !mountPoint {
if err = mounter.Unmount(dir); err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
}
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
}
if !mountPoint {
// not cool. leave for next sync loop.
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount(). Will try again next sync loop.", diskName, dir, mountErr)
}
}
if err = os.Remove(dir); err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr)
}
glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, err, mountErr)
return mountErr
}
if volumeSource.ReadOnly == nil || !*volumeSource.ReadOnly {
volume.SetVolumeOwnership(m, fsGroup)
}
glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
return nil
}
func (u *azureDiskUnmounter) TearDown() error {
return u.TearDownAt(u.GetPath())
}
func (u *azureDiskUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
glog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
mounter := u.plugin.host.GetMounter(u.plugin.GetPluginName())
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err)
}
if mountPoint {
if err := os.Remove(dir); err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err)
}
}
if err := mounter.Unmount(dir); err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err)
}
mountPoint, err = mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err)
}
if mountPoint {
return os.Remove(dir)
}
return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir")
}
func (u *azureDiskUnmounter) GetPath() string {
return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host)
}

View File

@ -0,0 +1,191 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume"
)
type azureDiskProvisioner struct {
plugin *azureDataDiskPlugin
options volume.VolumeOptions
}
type azureDiskDeleter struct {
*dataDisk
spec *volume.Spec
plugin *azureDataDiskPlugin
}
var _ volume.Provisioner = &azureDiskProvisioner{}
var _ volume.Deleter = &azureDiskDeleter{}
func (d *azureDiskDeleter) GetPath() string {
return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host)
}
func (d *azureDiskDeleter) Delete() error {
volumeSource, err := getVolumeSource(d.spec)
if err != nil {
return err
}
diskController, err := getDiskController(d.plugin.host)
if err != nil {
return err
}
managed := (*volumeSource.Kind == v1.AzureManagedDisk)
if managed {
return diskController.DeleteManagedDisk(volumeSource.DataDiskURI)
}
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI)
}
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
}
supportedModes := p.plugin.GetAccessModes()
// perform static validation first
if p.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
}
if len(p.options.PVC.Spec.AccessModes) > 1 {
return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin")
}
if len(p.options.PVC.Spec.AccessModes) == 1 {
if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] {
return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes)
}
}
var (
location, account string
storageAccountType, fsType string
cachingMode v1.AzureDataDiskCachingMode
strKind string
err error
)
// maxLength = 79 - (4 for ".vhd") = 75
name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
for k, v := range p.options.Parameters {
switch strings.ToLower(k) {
case "skuname":
storageAccountType = v
case "location":
location = v
case "storageaccount":
account = v
case "storageaccounttype":
storageAccountType = v
case "kind":
strKind = v
case "cachingmode":
cachingMode = v1.AzureDataDiskCachingMode(v)
case volume.VolumeParameterFSType:
fsType = strings.ToLower(v)
default:
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
}
}
// normalize values
fsType = normalizeFsType(fsType)
skuName, err := normalizeStorageAccountType(storageAccountType)
if err != nil {
return nil, err
}
kind, err := normalizeKind(strFirstLetterToUpper(strKind))
if err != nil {
return nil, err
}
if cachingMode, err = normalizeCachingMode(cachingMode); err != nil {
return nil, err
}
diskController, err := getDiskController(p.plugin.host)
if err != nil {
return nil, err
}
// create disk
diskURI := ""
if kind == v1.AzureManagedDisk {
diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags))
if err != nil {
return nil, err
}
} else {
if kind == v1.AzureDedicatedBlobDisk {
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGB)
if err != nil {
return nil, err
}
} else {
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB)
if err != nil {
return nil, err
}
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: p.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
AccessModes: supportedModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
CachingMode: &cachingMode,
DiskName: name,
DataDiskURI: diskURI,
Kind: &kind,
FSType: &fsType,
},
},
MountOptions: p.options.MountOptions,
},
}
return pv, nil
}

64
vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD generated vendored Normal file
View File

@ -0,0 +1,64 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"azure_file.go",
"azure_provision.go",
"azure_util.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/azure_file",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["azure_file_test.go"],
importpath = "k8s.io/kubernetes/pkg/volume/azure_file",
library = ":go_default_library",
deps = [
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

11
vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS generated vendored Normal file
View File

@ -0,0 +1,11 @@
approvers:
- brendandburns
- rootfs
reviewers:
- rootfs
- brendandburns
- saad-ali
- jsafrane
- jingxu97
- msau42
- andyzhangx

View File

@ -0,0 +1,332 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_file
import (
"fmt"
"os"
"runtime"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/volume/util"
)
// ProbeVolumePlugins is the primary endpoint for volume plugins
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&azureFilePlugin{nil}}
}
type azureFilePlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &azureFilePlugin{}
var _ volume.PersistentVolumePlugin = &azureFilePlugin{}
const (
azureFilePluginName = "kubernetes.io/azure-file"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(azureFilePluginName), volName)
}
func (plugin *azureFilePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *azureFilePlugin) GetPluginName() string {
return azureFilePluginName
}
func (plugin *azureFilePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
share, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return share, nil
}
func (plugin *azureFilePlugin) CanSupport(spec *volume.Spec) bool {
//TODO: check if mount.cifs is there
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureFile != nil) ||
(spec.Volume != nil && spec.Volume.AzureFile != nil)
}
func (plugin *azureFilePlugin) RequiresRemount() bool {
return false
}
func (plugin *azureFilePlugin) SupportsMountOption() bool {
return true
}
func (plugin *azureFilePlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *azureFilePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func (plugin *azureFilePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, &azureSvc{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
share, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
secretName, secretNamespace, err := getSecretNameAndNamespace(spec, pod.Namespace)
return &azureFileMounter{
azureFile: &azureFile{
volName: spec.Name(),
mounter: mounter,
pod: pod,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(pod.UID, spec.Name(), plugin.host)),
},
util: util,
secretNamespace: secretNamespace,
secretName: secretName,
shareName: share,
readOnly: readOnly,
mountOptions: volume.MountOptionFromSpec(spec),
}, nil
}
func (plugin *azureFilePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *azureFilePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &azureFileUnmounter{&azureFile{
volName: volName,
mounter: mounter,
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *azureFilePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
azureVolume := &v1.Volume{
Name: volName,
VolumeSource: v1.VolumeSource{
AzureFile: &v1.AzureFileVolumeSource{
SecretName: volName,
ShareName: volName,
},
},
}
return volume.NewSpecFromVolume(azureVolume), nil
}
// azureFile volumes represent mount of an AzureFile share.
type azureFile struct {
volName string
podUID types.UID
pod *v1.Pod
mounter mount.Interface
plugin *azureFilePlugin
volume.MetricsProvider
}
func (azureFileVolume *azureFile) GetPath() string {
return getPath(azureFileVolume.pod.UID, azureFileVolume.volName, azureFileVolume.plugin.host)
}
type azureFileMounter struct {
*azureFile
util azureUtil
secretName string
secretNamespace string
shareName string
readOnly bool
mountOptions []string
}
var _ volume.Mounter = &azureFileMounter{}
func (b *azureFileMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: false,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *azureFileMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *azureFileMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
return err
}
if !notMnt {
return nil
}
var accountKey, accountName string
if accountName, accountKey, err = b.util.GetAzureCredentials(b.plugin.host, b.secretNamespace, b.secretName); err != nil {
return err
}
mountOptions := []string{}
source := ""
osSeparator := string(os.PathSeparator)
source = fmt.Sprintf("%s%s%s.file.%s%s%s", osSeparator, osSeparator, accountName, getStorageEndpointSuffix(b.plugin.host.GetCloudProvider()), osSeparator, b.shareName)
if runtime.GOOS == "windows" {
mountOptions = []string{fmt.Sprintf("AZURE\\%s", accountName), accountKey}
} else {
os.MkdirAll(dir, 0700)
// parameters suggested by https://azure.microsoft.com/en-us/documentation/articles/storage-how-to-use-files-linux/
options := []string{fmt.Sprintf("username=%s,password=%s", accountName, accountKey)}
if b.readOnly {
options = append(options, "ro")
}
mountOptions = volume.JoinMountOptions(b.mountOptions, options)
mountOptions = appendDefaultMountOptions(mountOptions)
}
err = b.mounter.Mount(source, dir, "cifs", mountOptions)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
return err
}
return nil
}
var _ volume.Unmounter = &azureFileUnmounter{}
type azureFileUnmounter struct {
*azureFile
}
func (c *azureFileUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *azureFileUnmounter) TearDownAt(dir string) error {
return util.UnmountPath(dir, c.mounter)
}
func getVolumeSource(spec *volume.Spec) (string, bool, error) {
if spec.Volume != nil && spec.Volume.AzureFile != nil {
share := spec.Volume.AzureFile.ShareName
readOnly := spec.Volume.AzureFile.ReadOnly
return share, readOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.AzureFile != nil {
share := spec.PersistentVolume.Spec.AzureFile.ShareName
readOnly := spec.ReadOnly
return share, readOnly, nil
}
return "", false, fmt.Errorf("Spec does not reference an AzureFile volume type")
}
func getSecretNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) {
secretName := ""
secretNamespace := ""
if spec.Volume != nil && spec.Volume.AzureFile != nil {
secretName = spec.Volume.AzureFile.SecretName
secretNamespace = defaultNamespace
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.AzureFile != nil {
secretNamespace = defaultNamespace
if spec.PersistentVolume.Spec.AzureFile.SecretNamespace != nil {
secretNamespace = *spec.PersistentVolume.Spec.AzureFile.SecretNamespace
}
secretName = spec.PersistentVolume.Spec.AzureFile.SecretName
} else {
return "", "", fmt.Errorf("Spec does not reference an AzureFile volume type")
}
if len(secretNamespace) == 0 {
return "", "", fmt.Errorf("invalid Azure volume: nil namespace")
}
return secretName, secretNamespace, nil
}
func getAzureCloud(cloudProvider cloudprovider.Interface) (*azure.Cloud, error) {
azure, ok := cloudProvider.(*azure.Cloud)
if !ok || azure == nil {
return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return azure, nil
}
func getStorageEndpointSuffix(cloudprovider cloudprovider.Interface) string {
const publicCloudStorageEndpointSuffix = "core.windows.net"
azure, err := getAzureCloud(cloudprovider)
if err != nil {
glog.Warningf("No Azure cloud provider found. Using the Azure public cloud endpoint: %s", publicCloudStorageEndpointSuffix)
return publicCloudStorageEndpointSuffix
}
return azure.Environment.StorageEndpointSuffix
}

View File

@ -0,0 +1,397 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_file
import (
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := ioutil.TempDir(os.TempDir(), "azureFileTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/azure-file" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureFile: &v1.AzureFileVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureFile: &v1.AzureFilePersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := ioutil.TempDir(os.TempDir(), "azureFileTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/azure-file")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func getAzureTestCloud(t *testing.T) *azure.Cloud {
config := `{
"aadClientId": "--aad-client-id--",
"aadClientSecret": "--aad-client-secret--"
}`
configReader := strings.NewReader(config)
cloud, err := azure.NewCloud(configReader)
if err != nil {
t.Error(err)
}
azureCloud, ok := cloud.(*azure.Cloud)
if !ok {
t.Error("NewCloud returned incorrect type")
}
return azureCloud
}
func getTestTempDir(t *testing.T) string {
tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
return tmpDir
}
func TestPluginAzureCloudProvider(t *testing.T) {
tmpDir := getTestTempDir(t)
defer os.RemoveAll(tmpDir)
testPlugin(t, tmpDir, volumetest.NewFakeVolumeHostWithCloudProvider(tmpDir, nil, nil, getAzureTestCloud(t)))
}
func TestPluginWithoutCloudProvider(t *testing.T) {
tmpDir := getTestTempDir(t)
defer os.RemoveAll(tmpDir)
testPlugin(t, tmpDir, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
}
func TestPluginWithOtherCloudProvider(t *testing.T) {
tmpDir := getTestTempDir(t)
defer os.RemoveAll(tmpDir)
cloud := &fakecloud.FakeCloud{}
testPlugin(t, tmpDir, volumetest.NewFakeVolumeHostWithCloudProvider(tmpDir, nil, nil, cloud))
}
func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
AzureFile: &v1.AzureFileVolumeSource{
SecretName: "secret",
ShareName: "share",
},
},
}
fake := &mount.FakeMounter{}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-file/vol1")
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureFile: &v1.AzureFilePersistentVolumeSource{},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(azureFilePluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
type fakeAzureSvc struct{}
func (s *fakeAzureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName string) (string, string, error) {
return "name", "key", nil
}
func (s *fakeAzureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accountName, accountKey string) (string, error) {
return "secret", nil
}
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
AzureFile: &v1.AzureFileVolumeSource{
SecretName: "secret",
ShareName: "share",
},
},
}
fake := &mount.FakeMounter{}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if _, ok := mounter.(volume.Unmounter); ok {
t.Errorf("Volume Mounter can be type-assert to Unmounter")
}
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if _, ok := unmounter.(volume.Mounter); ok {
t.Errorf("Volume Unmounter can be type-assert to Mounter")
}
}
type testcase struct {
name string
defaultNs string
spec *volume.Spec
// Expected return of the test
expectedName string
expectedNs string
expectedError error
}
func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
secretNs := "ns"
tests := []testcase{
{
name: "persistent volume source",
defaultNs: "default",
spec: &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureFile: &v1.AzureFilePersistentVolumeSource{
ShareName: "share",
SecretName: "name",
SecretNamespace: &secretNs,
},
},
},
},
},
expectedName: "name",
expectedNs: "ns",
expectedError: nil,
},
{
name: "persistent volume source without namespace",
defaultNs: "default",
spec: &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureFile: &v1.AzureFilePersistentVolumeSource{
ShareName: "share",
SecretName: "name",
},
},
},
},
},
expectedName: "name",
expectedNs: "default",
expectedError: nil,
},
{
name: "pod volume source",
defaultNs: "default",
spec: &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
AzureFile: &v1.AzureFileVolumeSource{
ShareName: "share",
SecretName: "name",
},
},
},
},
expectedName: "name",
expectedNs: "default",
expectedError: nil,
},
}
for _, testcase := range tests {
resultName, resultNs, err := getSecretNameAndNamespace(testcase.spec, testcase.defaultNs)
if err != testcase.expectedError || resultName != testcase.expectedName || resultNs != testcase.expectedNs {
t.Errorf("%s failed: expected err=%v ns=%q name=%q, got %v/%q/%q", testcase.name, testcase.expectedError, testcase.expectedNs, testcase.expectedName,
err, resultNs, resultName)
}
}
}
func TestAppendDefaultMountOptions(t *testing.T) {
tests := []struct {
options []string
expected []string
}{
{
options: []string{"dir_mode=0777"},
expected: []string{"dir_mode=0777", fmt.Sprintf("%s=%s", fileMode, defaultFileMode), fmt.Sprintf("%s=%s", vers, defaultVers)},
},
{
options: []string{"file_mode=0777"},
expected: []string{"file_mode=0777", fmt.Sprintf("%s=%s", dirMode, defaultDirMode), fmt.Sprintf("%s=%s", vers, defaultVers)},
},
{
options: []string{"vers=2.1"},
expected: []string{"vers=2.1", fmt.Sprintf("%s=%s", fileMode, defaultFileMode), fmt.Sprintf("%s=%s", dirMode, defaultDirMode)},
},
{
options: []string{""},
expected: []string{"", fmt.Sprintf("%s=%s", fileMode, defaultFileMode), fmt.Sprintf("%s=%s", dirMode, defaultDirMode), fmt.Sprintf("%s=%s", vers, defaultVers)},
},
{
options: []string{"file_mode=0777", "dir_mode=0777"},
expected: []string{"file_mode=0777", "dir_mode=0777", fmt.Sprintf("%s=%s", vers, defaultVers)},
},
}
for _, test := range tests {
result := appendDefaultMountOptions(test.options)
if !reflect.DeepEqual(result, test.expected) {
t.Errorf("input: %q, appendDefaultMountOptions result: %q, expected: %q", test.options, result, test.expected)
}
}
}

View File

@ -0,0 +1,213 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_file
import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
var _ volume.DeletableVolumePlugin = &azureFilePlugin{}
var _ volume.ProvisionableVolumePlugin = &azureFilePlugin{}
// Abstract interface to file share operations.
// azure cloud provider should implement it
type azureCloudProvider interface {
// create a file share
CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error)
// delete a file share
DeleteFileShare(accountName, key, name string) error
}
type azureFileDeleter struct {
*azureFile
accountName, accountKey, shareName string
azureProvider azureCloudProvider
}
func (plugin *azureFilePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
return nil, err
}
return plugin.newDeleterInternal(spec, &azureSvc{}, azure)
}
func (plugin *azureFilePlugin) newDeleterInternal(spec *volume.Spec, util azureUtil, azure azureCloudProvider) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureFile == nil {
return nil, fmt.Errorf("invalid PV spec")
}
secretName, secretNamespace, err := getSecretNameAndNamespace(spec, spec.PersistentVolume.Spec.ClaimRef.Namespace)
if err != nil {
return nil, err
}
shareName := spec.PersistentVolume.Spec.AzureFile.ShareName
if accountName, accountKey, err := util.GetAzureCredentials(plugin.host, secretNamespace, secretName); err != nil {
return nil, err
} else {
return &azureFileDeleter{
azureFile: &azureFile{
volName: spec.Name(),
plugin: plugin,
},
shareName: shareName,
accountName: accountName,
accountKey: accountKey,
azureProvider: azure,
}, nil
}
}
func (plugin *azureFilePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
return nil, err
}
if len(options.PVC.Spec.AccessModes) == 0 {
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
}
return plugin.newProvisionerInternal(options, azure)
}
func (plugin *azureFilePlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) {
return &azureFileProvisioner{
azureFile: &azureFile{
plugin: plugin,
},
azureProvider: azure,
util: &azureSvc{},
options: options,
}, nil
}
var _ volume.Deleter = &azureFileDeleter{}
func (f *azureFileDeleter) GetPath() string {
name := azureFilePluginName
return f.plugin.host.GetPodVolumeDir(f.podUID, utilstrings.EscapeQualifiedNameForDisk(name), f.volName)
}
func (f *azureFileDeleter) Delete() error {
glog.V(4).Infof("deleting volume %s", f.shareName)
return f.azureProvider.DeleteFileShare(f.accountName, f.accountKey, f.shareName)
}
type azureFileProvisioner struct {
*azureFile
azureProvider azureCloudProvider
util azureUtil
options volume.VolumeOptions
}
var _ volume.Provisioner = &azureFileProvisioner{}
func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
}
var sku, location, account string
// File share name has a length limit of 63, and it cannot contain two consecutive '-'s.
name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
name = strings.Replace(name, "--", "-", -1)
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
secretNamespace := a.options.PVC.Namespace
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range a.options.Parameters {
switch strings.ToLower(k) {
case "skuname":
sku = v
case "location":
location = v
case "storageaccount":
account = v
case "secretnamespace":
secretNamespace = v
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
}
}
// TODO: implement c.options.ProvisionerSelector parsing
if a.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure file")
}
account, key, err := a.azureProvider.CreateFileShare(name, account, sku, location, requestGB)
if err != nil {
return nil, err
}
// create a secret for storage account and key
secretName, err := a.util.SetAzureCredentials(a.plugin.host, secretNamespace, account, key)
if err != nil {
return nil, err
}
// create PV
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: a.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
volumehelper.VolumeDynamicallyCreatedByKey: "azure-file-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
AccessModes: a.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureFile: &v1.AzureFilePersistentVolumeSource{
SecretName: secretName,
ShareName: name,
SecretNamespace: &secretNamespace,
},
},
MountOptions: a.options.MountOptions,
},
}
return pv, nil
}
// Return cloud provider
func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) {
azureCloudProvider, ok := cloudProvider.(*azure.Cloud)
if !ok || azureCloudProvider == nil {
return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return azureCloudProvider, nil
}

View File

@ -0,0 +1,129 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_file
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume"
)
const (
fileMode = "file_mode"
dirMode = "dir_mode"
vers = "vers"
defaultFileMode = "0755"
defaultDirMode = "0755"
defaultVers = "3.0"
)
// Abstract interface to azure file operations.
type azureUtil interface {
GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName string) (string, string, error)
SetAzureCredentials(host volume.VolumeHost, nameSpace, accountName, accountKey string) (string, error)
}
type azureSvc struct{}
func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName string) (string, string, error) {
var accountKey, accountName string
kubeClient := host.GetKubeClient()
if kubeClient == nil {
return "", "", fmt.Errorf("Cannot get kube client")
}
keys, err := kubeClient.Core().Secrets(nameSpace).Get(secretName, metav1.GetOptions{})
if err != nil {
return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName)
}
for name, data := range keys.Data {
if name == "azurestorageaccountname" {
accountName = string(data)
}
if name == "azurestorageaccountkey" {
accountKey = string(data)
}
}
if accountName == "" || accountKey == "" {
return "", "", fmt.Errorf("Invalid %v/%v, couldn't extract azurestorageaccountname or azurestorageaccountkey", nameSpace, secretName)
}
return accountName, accountKey, nil
}
func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accountName, accountKey string) (string, error) {
kubeClient := host.GetKubeClient()
if kubeClient == nil {
return "", fmt.Errorf("Cannot get kube client")
}
secretName := "azure-storage-account-" + accountName + "-secret"
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: nameSpace,
Name: secretName,
},
Data: map[string][]byte{
"azurestorageaccountname": []byte(accountName),
"azurestorageaccountkey": []byte(accountKey),
},
Type: "Opaque",
}
_, err := kubeClient.Core().Secrets(nameSpace).Create(secret)
if errors.IsAlreadyExists(err) {
err = nil
}
if err != nil {
return "", fmt.Errorf("Couldn't create secret %v", err)
}
return secretName, err
}
// check whether mountOptions contain file_mode and dir_mode, if not, append default mode
func appendDefaultMountOptions(mountOptions []string) []string {
fileModeFlag := false
dirModeFlag := false
versFlag := false
for _, mountOption := range mountOptions {
if strings.HasPrefix(mountOption, fileMode) {
fileModeFlag = true
}
if strings.HasPrefix(mountOption, dirMode) {
dirModeFlag = true
}
if strings.HasPrefix(mountOption, vers) {
versFlag = true
}
}
allMountOptions := mountOptions
if !fileModeFlag {
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", fileMode, defaultFileMode))
}
if !dirModeFlag {
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", dirMode, defaultDirMode))
}
if !versFlag {
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", vers, defaultVers))
}
return allMountOptions
}

19
vendor/k8s.io/kubernetes/pkg/volume/azure_file/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package azure_file contains the internal representation of
// Azure File Service Volume
package azure_file // import "k8s.io/kubernetes/pkg/volume/azure_file"

54
vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD generated vendored Normal file
View File

@ -0,0 +1,54 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"cephfs.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/cephfs",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["cephfs_test.go"],
importpath = "k8s.io/kubernetes/pkg/volume/cephfs",
library = ":go_default_library",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

9
vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS generated vendored Normal file
View File

@ -0,0 +1,9 @@
approvers:
- rootfs
- saad-ali
reviewers:
- rootfs
- saad-ali
- jsafrane
- jingxu97
- msau42

344
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go generated vendored Normal file
View File

@ -0,0 +1,344 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"fmt"
"os"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&cephfsPlugin{nil}}
}
type cephfsPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &cephfsPlugin{}
const (
cephfsPluginName = "kubernetes.io/cephfs"
)
func (plugin *cephfsPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *cephfsPlugin) GetPluginName() string {
return cephfsPluginName
}
func (plugin *cephfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
mon, _, _, _, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return fmt.Sprintf("%v", mon), nil
}
func (plugin *cephfsPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.Volume != nil && spec.Volume.CephFS != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CephFS != nil)
}
func (plugin *cephfsPlugin) RequiresRemount() bool {
return false
}
func (plugin *cephfsPlugin) SupportsMountOption() bool {
return true
}
func (plugin *cephfsPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *cephfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace)
if err != nil {
return nil, err
}
secret := ""
if len(secretName) > 0 && len(secretNs) > 0 {
// if secret is provideded, retrieve it
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
return nil, err
}
for name, data := range secrets.Data {
secret = string(data)
glog.V(4).Infof("found ceph secret info: %s", name)
}
}
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(plugin.GetPluginName()), secret)
}
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
mon, path, id, secretFile, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
if id == "" {
id = "admin"
}
if path == "" {
path = "/"
}
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
if secretFile == "" {
secretFile = "/etc/ceph/" + id + ".secret"
}
return &cephfsMounter{
cephfs: &cephfs{
podUID: podUID,
volName: spec.Name(),
mon: mon,
path: path,
secret: secret,
id: id,
secret_file: secretFile,
readonly: readOnly,
mounter: mounter,
plugin: plugin,
mountOptions: volume.MountOptionFromSpec(spec),
},
}, nil
}
func (plugin *cephfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cephfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &cephfsUnmounter{
cephfs: &cephfs{
podUID: podUID,
volName: volName,
mounter: mounter,
plugin: plugin},
}, nil
}
func (plugin *cephfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
cephfsVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{},
Path: volumeName,
},
},
}
return volume.NewSpecFromVolume(cephfsVolume), nil
}
// CephFS volumes represent a bare host file or directory mount of an CephFS export.
type cephfs struct {
volName string
podUID types.UID
mon []string
path string
id string
secret string
secret_file string
readonly bool
mounter mount.Interface
plugin *cephfsPlugin
volume.MetricsNil
mountOptions []string
}
type cephfsMounter struct {
*cephfs
}
var _ volume.Mounter = &cephfsMounter{}
func (cephfsVolume *cephfsMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: cephfsVolume.readonly,
Managed: false,
SupportsSELinux: false,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (cephfsMounter *cephfsMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error {
return cephfsVolume.SetUpAt(cephfsVolume.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
return err
}
if !notMnt {
return nil
}
os.MkdirAll(dir, 0750)
err = cephfsVolume.execMount(dir)
if err == nil {
return nil
}
// cleanup upon failure
util.UnmountPath(dir, cephfsVolume.mounter)
// return error
return err
}
type cephfsUnmounter struct {
*cephfs
}
var _ volume.Unmounter = &cephfsUnmounter{}
// TearDown unmounts the bind mount
func (cephfsVolume *cephfsUnmounter) TearDown() error {
return cephfsVolume.TearDownAt(cephfsVolume.GetPath())
}
// TearDownAt unmounts the bind mount
func (cephfsVolume *cephfsUnmounter) TearDownAt(dir string) error {
return util.UnmountPath(dir, cephfsVolume.mounter)
}
// GetPath creates global mount path
func (cephfsVolume *cephfs) GetPath() string {
name := cephfsPluginName
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
}
func (cephfsVolume *cephfs) execMount(mountpoint string) error {
// cephfs mount option
ceph_opt := ""
// override secretfile if secret is provided
if cephfsVolume.secret != "" {
ceph_opt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret
} else {
ceph_opt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secret_file
}
// build option array
opt := []string{}
if cephfsVolume.readonly {
opt = append(opt, "ro")
}
opt = append(opt, ceph_opt)
// build src like mon1:6789,mon2:6789,mon3:6789:/
hosts := cephfsVolume.mon
l := len(hosts)
// pass all monitors and let ceph randomize and fail over
i := 0
src := ""
for i = 0; i < l-1; i++ {
src += hosts[i] + ","
}
src += hosts[i] + ":" + cephfsVolume.path
mountOptions := volume.JoinMountOptions(cephfsVolume.mountOptions, opt)
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", mountOptions); err != nil {
return fmt.Errorf("CephFS: mount failed: %v", err)
}
return nil
}
func getVolumeSource(spec *volume.Spec) ([]string, string, string, string, bool, error) {
if spec.Volume != nil && spec.Volume.CephFS != nil {
mon := spec.Volume.CephFS.Monitors
path := spec.Volume.CephFS.Path
user := spec.Volume.CephFS.User
secretFile := spec.Volume.CephFS.SecretFile
readOnly := spec.Volume.CephFS.ReadOnly
return mon, path, user, secretFile, readOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CephFS != nil {
mon := spec.PersistentVolume.Spec.CephFS.Monitors
path := spec.PersistentVolume.Spec.CephFS.Path
user := spec.PersistentVolume.Spec.CephFS.User
secretFile := spec.PersistentVolume.Spec.CephFS.SecretFile
readOnly := spec.PersistentVolume.Spec.CephFS.ReadOnly
return mon, path, user, secretFile, readOnly, nil
}
return nil, "", "", "", false, fmt.Errorf("Spec does not reference a CephFS volume type")
}
func getSecretNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) {
if spec.Volume != nil && spec.Volume.CephFS != nil {
localSecretRef := spec.Volume.CephFS.SecretRef
if localSecretRef != nil {
return localSecretRef.Name, defaultNamespace, nil
}
return "", "", nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CephFS != nil {
secretRef := spec.PersistentVolume.Spec.CephFS.SecretRef
secretNs := defaultNamespace
if secretRef != nil {
if len(secretRef.Namespace) != 0 {
secretNs = secretRef.Namespace
}
return secretRef.Name, secretNs, nil
}
return "", "", nil
}
return "", "", fmt.Errorf("Spec does not reference an CephFS volume type")
}

View File

@ -0,0 +1,227 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"os"
"path"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/cephfs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{CephFS: &v1.CephFSVolumeSource{}}}}) {
t.Errorf("Expected true")
}
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: nil,
SecretFile: "/etc/ceph/user.secret",
},
},
}
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1")
if volumePath != volpath {
t.Errorf("Got unexpected path: %s", volumePath)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}
func TestConstructVolumeSpec(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephTest")
if err != nil {
t.Fatalf("Can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("can't find cephfs plugin by name")
}
cephfsSpec, err := plug.(*cephfsPlugin).ConstructVolumeSpec("cephfsVolume", "/cephfsVolume/")
if cephfsSpec.Name() != "cephfsVolume" {
t.Errorf("Get wrong cephfs spec name, got: %s", cephfsSpec.Name())
}
}
type testcase struct {
name string
defaultNs string
spec *volume.Spec
// Expected return of the test
expectedName string
expectedNs string
expectedError error
}
func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
tests := []testcase{
{
name: "persistent volume source",
defaultNs: "default",
spec: &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CephFS: &v1.CephFSPersistentVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: &v1.SecretReference{
Name: "name",
Namespace: "ns",
},
SecretFile: "/etc/ceph/user.secret",
},
},
},
},
},
expectedName: "name",
expectedNs: "ns",
expectedError: nil,
},
{
name: "persistent volume source without namespace",
defaultNs: "default",
spec: &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CephFS: &v1.CephFSPersistentVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: &v1.SecretReference{
Name: "name",
},
SecretFile: "/etc/ceph/user.secret",
},
},
},
},
},
expectedName: "name",
expectedNs: "default",
expectedError: nil,
},
{
name: "pod volume source",
defaultNs: "default",
spec: &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: &v1.LocalObjectReference{
Name: "name",
},
SecretFile: "/etc/ceph/user.secret",
},
},
},
},
expectedName: "name",
expectedNs: "default",
expectedError: nil,
},
}
for _, testcase := range tests {
resultName, resultNs, err := getSecretNameAndNamespace(testcase.spec, testcase.defaultNs)
if err != testcase.expectedError || resultName != testcase.expectedName || resultNs != testcase.expectedNs {
t.Errorf("%s failed: expected err=%v ns=%q name=%q, got %v/%q/%q", testcase.name, testcase.expectedError, testcase.expectedNs, testcase.expectedName,
err, resultNs, resultName)
}
}
}

19
vendor/k8s.io/kubernetes/pkg/volume/cephfs/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs contains the internal representation of Ceph file system
// (CephFS) volumes.
package cephfs // import "k8s.io/kubernetes/pkg/volume/cephfs"

72
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD generated vendored Normal file
View File

@ -0,0 +1,72 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"cinder.go",
"cinder_util.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/cinder",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/openstack:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"cinder_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/cinder",
library = ":go_default_library",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

13
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS generated vendored Normal file
View File

@ -0,0 +1,13 @@
approvers:
- jsafrane
- anguslees
- dims
- FengyunPan
reviewers:
- anguslees
- rootfs
- saad-ali
- jsafrane
- jingxu97
- msau42
- FengyunPan

434
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go generated vendored Normal file
View File

@ -0,0 +1,434 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"fmt"
"os"
"path"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type cinderDiskAttacher struct {
host volume.VolumeHost
cinderProvider CinderProvider
}
var _ volume.Attacher = &cinderDiskAttacher{}
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
const (
checkSleepDuration = 1 * time.Second
operationFinishInitDealy = 1 * time.Second
operationFinishFactor = 1.1
operationFinishSteps = 10
diskAttachInitDealy = 1 * time.Second
diskAttachFactor = 1.2
diskAttachSteps = 15
diskDetachInitDealy = 1 * time.Second
diskDetachFactor = 1.2
diskDetachSteps = 13
)
func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &cinderDiskAttacher{
host: plugin.host,
cinderProvider: cinder,
}, nil
}
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
backoff := wait.Backoff{
Duration: operationFinishInitDealy,
Factor: operationFinishFactor,
Steps: operationFinishSteps,
}
var volumeStatus string
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
var pending bool
var err error
pending, volumeStatus, err = attacher.cinderProvider.OperationPending(volumeID)
if err != nil {
return false, err
}
return !pending, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Volume %q is %s, can't finish within the alloted time", volumeID, volumeStatus)
}
return err
}
func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string) error {
backoff := wait.Backoff{
Duration: diskAttachInitDealy,
Factor: diskAttachFactor,
Steps: diskAttachSteps,
}
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
if err != nil {
return false, err
}
return attached, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Volume %q failed to be attached within the alloted time", volumeID)
}
return err
}
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
volumeID := volumeSource.VolumeID
instanceID, err := attacher.nodeInstanceID(nodeName)
if err != nil {
return "", err
}
if err := attacher.waitOperationFinished(volumeID); err != nil {
return "", err
}
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
if err != nil {
// Log error and continue with attach
glog.Warningf(
"Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v",
volumeID, instanceID, err)
}
if err == nil && attached {
// Volume is already attached to instance.
glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
} else {
_, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID)
if err == nil {
if err = attacher.waitDiskAttached(instanceID, volumeID); err != nil {
glog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err)
return "", err
}
glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
} else {
glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
return "", err
}
}
devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID)
if err != nil {
glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
return "", err
}
return devicePath, nil
}
func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
volumeSpecMap := make(map[string]*volume.Spec)
volumeIDList := []string{}
for _, spec := range specs {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
volumeIDList = append(volumeIDList, volumeSource.VolumeID)
volumesAttachedCheck[spec] = true
volumeSpecMap[volumeSource.VolumeID] = spec
}
instanceID, err := attacher.nodeInstanceID(nodeName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// If node doesn't exist, OpenStack Nova will assume the volumes are not attached to it.
// Mark the volumes as detached and return false without error.
glog.Warningf("VolumesAreAttached: node %q does not exist.", nodeName)
for spec := range volumesAttachedCheck {
volumesAttachedCheck[spec] = false
}
return volumesAttachedCheck, nil
}
return volumesAttachedCheck, err
}
attachedResult, err := attacher.cinderProvider.DisksAreAttached(instanceID, volumeIDList)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
}
for volumeID, attached := range attachedResult {
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
}
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
volumeID := volumeSource.VolumeID
if devicePath == "" {
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID)
}
ticker := time.NewTicker(checkSleepDuration)
defer ticker.Stop()
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
probeAttachedVolume()
if !attacher.cinderProvider.ShouldTrustDevicePath() {
// Using the Cinder volume ID, find the real device path (See Issue #33128)
devicePath = attacher.cinderProvider.GetDevicePath(volumeID)
}
exists, err := volumeutil.PathExists(devicePath)
if exists && err == nil {
glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
return devicePath, nil
} else {
// Log an error, and continue checking periodically
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
}
case <-timer.C:
return "", fmt.Errorf("Could not find attached Cinder disk %q. Timeout waiting for mount paths to be created.", volumeID)
}
}
}
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return makeGlobalPDName(attacher.host, volumeSource.VolumeID), nil
}
// FIXME: this method can be further pruned.
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter(cinderVolumePluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if readOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
}
}
return nil
}
type cinderDiskDetacher struct {
mounter mount.Interface
cinderProvider CinderProvider
}
var _ volume.Detacher = &cinderDiskDetacher{}
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &cinderDiskDetacher{
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
cinderProvider: cinder,
}, nil
}
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
backoff := wait.Backoff{
Duration: operationFinishInitDealy,
Factor: operationFinishFactor,
Steps: operationFinishSteps,
}
var volumeStatus string
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
var pending bool
var err error
pending, volumeStatus, err = detacher.cinderProvider.OperationPending(volumeID)
if err != nil {
return false, err
}
return !pending, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Volume %q is %s, can't finish within the alloted time", volumeID, volumeStatus)
}
return err
}
func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string) error {
backoff := wait.Backoff{
Duration: diskDetachInitDealy,
Factor: diskDetachFactor,
Steps: diskDetachSteps,
}
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
if err != nil {
return false, err
}
return !attached, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Volume %q failed to detach within the alloted time", volumeID)
}
return err
}
func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error {
volumeID := path.Base(volumeName)
instances, res := detacher.cinderProvider.Instances()
if !res {
return fmt.Errorf("failed to list openstack instances")
}
instanceID, err := instances.InstanceID(nodeName)
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
instanceID = instanceID[(ind + 1):]
}
if err := detacher.waitOperationFinished(volumeID); err != nil {
return err
}
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
if err != nil {
// Log error and continue with detach
glog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volumeID, nodeName, err)
}
if err == nil && !attached {
// Volume is already detached from node.
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
return nil
}
if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil {
glog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err)
return err
}
if err = detacher.waitDiskDetached(instanceID, volumeID); err != nil {
glog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err)
return err
}
glog.Infof("detached volume %q from node %q", volumeID, nodeName)
return nil
}
func (detacher *cinderDiskDetacher) UnmountDevice(deviceMountPath string) error {
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
}
func (attacher *cinderDiskAttacher) nodeInstanceID(nodeName types.NodeName) (string, error) {
instances, res := attacher.cinderProvider.Instances()
if !res {
return "", fmt.Errorf("failed to list openstack instances")
}
instanceID, err := instances.InstanceID(nodeName)
if err != nil {
return "", err
}
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
instanceID = instanceID[(ind + 1):]
}
return instanceID, nil
}

View File

@ -0,0 +1,672 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"errors"
"reflect"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"fmt"
"sort"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
)
const (
VolumeStatusPending = "pending"
VolumeStatusDone = "done"
)
var attachStatus = "Attach"
var detachStatus = "Detach"
func TestGetDeviceName_Volume(t *testing.T) {
plugin := newPlugin()
name := "my-cinder-volume"
spec := createVolSpec(name, false)
deviceName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetDeviceName error: %v", err)
}
if deviceName != name {
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
}
}
func TestGetDeviceName_PersistentVolume(t *testing.T) {
plugin := newPlugin()
name := "my-cinder-pv"
spec := createPVSpec(name, true)
deviceName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetDeviceName error: %v", err)
}
if deviceName != name {
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
}
}
func TestGetDeviceMountPath(t *testing.T) {
name := "cinder-volume-id"
spec := createVolSpec(name, false)
rootDir := "/var/lib/kubelet/"
host := volumetest.NewFakeVolumeHost(rootDir, nil, nil)
attacher := &cinderDiskAttacher{
host: host,
}
//test the path
path, err := attacher.GetDeviceMountPath(spec)
if err != nil {
t.Errorf("Get device mount path error")
}
expectedPath := rootDir + "plugins/kubernetes.io/cinder/mounts/" + name
if path != expectedPath {
t.Errorf("Device mount path error: expected %s, got %s ", expectedPath, path)
}
}
// One testcase for TestAttachDetach table test below
type testcase struct {
name string
// For fake GCE:
attach attachCall
detach detachCall
operationPending operationPendingCall
diskIsAttached diskIsAttachedCall
disksAreAttached disksAreAttachedCall
diskPath diskPathCall
t *testing.T
attachOrDetach *string
instanceID string
// Actual test to run
test func(test *testcase) (string, error)
// Expected return of the test
expectedResult string
expectedError error
}
func TestAttachDetach(t *testing.T) {
volumeID := "disk"
instanceID := "instance"
pending := VolumeStatusPending
done := VolumeStatusDone
nodeName := types.NodeName("nodeName")
readOnly := false
spec := createVolSpec(volumeID, readOnly)
attachError := errors.New("Fake attach error")
detachError := errors.New("Fake detach error")
diskCheckError := errors.New("Fake DiskIsAttached error")
diskPathError := errors.New("Fake GetAttachmentDiskPath error")
disksCheckError := errors.New("Fake DisksAreAttached error")
operationFinishTimeout := errors.New("Fake waitOperationFinished error")
tests := []testcase{
// Successful Attach call
{
name: "Attach_Positive",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
attach: attachCall{instanceID, volumeID, "", nil},
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedResult: "/dev/sda",
},
// Disk is already attached
{
name: "Attach_Positive_AlreadyAttached",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil},
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedResult: "/dev/sda",
},
// Disk is attaching
{
name: "Attach_is_attaching",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, true, pending, operationFinishTimeout},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedError: operationFinishTimeout,
},
// Attach call fails
{
name: "Attach_Negative",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
attach: attachCall{instanceID, volumeID, "/dev/sda", attachError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedError: attachError,
},
// GetAttachmentDiskPath call fails
{
name: "Attach_Negative_DiskPatchFails",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
attach: attachCall{instanceID, volumeID, "", nil},
diskPath: diskPathCall{instanceID, volumeID, "", diskPathError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedError: diskPathError,
},
// Successful VolumesAreAttached call, attached
{
name: "VolumesAreAttached_Positive",
instanceID: instanceID,
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: true}, nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
return serializeAttachments(attachments), err
},
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: true}),
},
// Successful VolumesAreAttached call, not attached
{
name: "VolumesAreAttached_Negative",
instanceID: instanceID,
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: false}, nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
return serializeAttachments(attachments), err
},
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: false}),
},
// Treat as attached when DisksAreAttached call fails
{
name: "VolumesAreAttached_CinderFailed",
instanceID: instanceID,
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, nil, disksCheckError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
return serializeAttachments(attachments), err
},
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: true}),
expectedError: disksCheckError,
},
// Detach succeeds
{
name: "Detach_Positive",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil},
detach: detachCall{instanceID, volumeID, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(volumeID, nodeName)
},
},
// Disk is already detached
{
name: "Detach_Positive_AlreadyDetached",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(volumeID, nodeName)
},
},
// Detach succeeds when DiskIsAttached fails
{
name: "Detach_Positive_CheckFails",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
detach: detachCall{instanceID, volumeID, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(volumeID, nodeName)
},
},
// Detach fails
{
name: "Detach_Negative",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, false, done, nil},
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
detach: detachCall{instanceID, volumeID, detachError},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(volumeID, nodeName)
},
expectedError: detachError,
},
// // Disk is detaching
{
name: "Detach_Is_Detaching",
instanceID: instanceID,
operationPending: operationPendingCall{volumeID, true, pending, operationFinishTimeout},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(volumeID, nodeName)
},
expectedError: operationFinishTimeout,
},
}
for _, testcase := range tests {
testcase.t = t
attachOrDetach := ""
testcase.attachOrDetach = &attachOrDetach
result, err := testcase.test(&testcase)
if err != testcase.expectedError {
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError, err)
}
if result != testcase.expectedResult {
t.Errorf("%s failed: expected result=%q, got %q", testcase.name, testcase.expectedResult, result)
}
}
}
type volumeAttachmentFlag struct {
volumeID string
attached bool
}
type volumeAttachmentFlags []volumeAttachmentFlag
func (va volumeAttachmentFlags) Len() int {
return len(va)
}
func (va volumeAttachmentFlags) Swap(i, j int) {
va[i], va[j] = va[j], va[i]
}
func (va volumeAttachmentFlags) Less(i, j int) bool {
if va[i].volumeID < va[j].volumeID {
return true
}
if va[i].volumeID > va[j].volumeID {
return false
}
return va[j].attached
}
func serializeAttachments(attachments map[*volume.Spec]bool) string {
var attachmentFlags volumeAttachmentFlags
for spec, attached := range attachments {
attachmentFlags = append(attachmentFlags, volumeAttachmentFlag{spec.Name(), attached})
}
sort.Sort(attachmentFlags)
return fmt.Sprint(attachmentFlags)
}
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
// and NewDetacher won't work.
func newPlugin() *cinderPlugin {
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
plugins := ProbeVolumePlugins()
plugin := plugins[0]
plugin.Init(host)
return plugin.(*cinderPlugin)
}
func newAttacher(testcase *testcase) *cinderDiskAttacher {
return &cinderDiskAttacher{
host: nil,
cinderProvider: testcase,
}
}
func newDetacher(testcase *testcase) *cinderDiskDetacher {
return &cinderDiskDetacher{
cinderProvider: testcase,
}
}
func createVolSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: name,
ReadOnly: readOnly,
},
},
},
}
}
func createPVSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: name,
ReadOnly: readOnly,
},
},
},
},
}
}
// Fake GCE implementation
type attachCall struct {
instanceID string
volumeID string
retDeviceName string
ret error
}
type detachCall struct {
instanceID string
devicePath string
ret error
}
type operationPendingCall struct {
diskName string
pending bool
volumeStatus string
ret error
}
type diskIsAttachedCall struct {
instanceID string
volumeID string
isAttached bool
ret error
}
type diskPathCall struct {
instanceID string
volumeID string
retPath string
ret error
}
type disksAreAttachedCall struct {
instanceID string
volumeIDs []string
areAttached map[string]bool
ret error
}
func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error) {
expected := &testcase.attach
if expected.volumeID == "" && expected.instanceID == "" {
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return "", errors.New("Unexpected AttachDisk call!")
}
if expected.volumeID != volumeID {
testcase.t.Errorf("Unexpected AttachDisk call: expected volumeID %s, got %s", expected.volumeID, volumeID)
return "", errors.New("Unexpected AttachDisk call: wrong volumeID")
}
if expected.instanceID != instanceID {
testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
return "", errors.New("Unexpected AttachDisk call: wrong instanceID")
}
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
testcase.attachOrDetach = &attachStatus
return expected.retDeviceName, expected.ret
}
func (testcase *testcase) DetachDisk(instanceID, volumeID string) error {
expected := &testcase.detach
if expected.devicePath == "" && expected.instanceID == "" {
// testcase.detach looks uninitialized, test did not expect to call
// DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!")
return errors.New("Unexpected DetachDisk call!")
}
if expected.devicePath != volumeID {
testcase.t.Errorf("Unexpected DetachDisk call: expected volumeID %s, got %s", expected.devicePath, volumeID)
return errors.New("Unexpected DetachDisk call: wrong volumeID")
}
if expected.instanceID != instanceID {
testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
return errors.New("Unexpected DetachDisk call: wrong instanceID")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
testcase.attachOrDetach = &detachStatus
return expected.ret
}
func (testcase *testcase) OperationPending(diskName string) (bool, string, error) {
expected := &testcase.operationPending
if expected.volumeStatus == VolumeStatusPending {
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
return true, expected.volumeStatus, expected.ret
}
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
return false, expected.volumeStatus, expected.ret
}
func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, error) {
expected := &testcase.diskIsAttached
// If testcase call DetachDisk*, return false
if *testcase.attachOrDetach == detachStatus {
return false, nil
}
// If testcase call AttachDisk*, return true
if *testcase.attachOrDetach == attachStatus {
return true, nil
}
if expected.volumeID == "" && expected.instanceID == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!")
return false, errors.New("Unexpected DiskIsAttached call!")
}
if expected.volumeID != volumeID {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected volumeID %s, got %s", expected.volumeID, volumeID)
return false, errors.New("Unexpected DiskIsAttached call: wrong volumeID")
}
if expected.instanceID != instanceID {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID")
}
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret
}
func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
expected := &testcase.diskPath
if expected.volumeID == "" && expected.instanceID == "" {
// testcase.diskPath looks uninitialized, test did not expect to
// call GetAttachmentDiskPath
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call!")
return "", errors.New("Unexpected GetAttachmentDiskPath call!")
}
if expected.volumeID != volumeID {
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected volumeID %s, got %s", expected.volumeID, volumeID)
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong volumeID")
}
if expected.instanceID != instanceID {
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected instanceID %s, got %s", expected.instanceID, instanceID)
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong instanceID")
}
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
return expected.retPath, expected.ret
}
func (testcase *testcase) ShouldTrustDevicePath() bool {
return true
}
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
return "", "", false, errors.New("Not implemented")
}
func (testcase *testcase) GetDevicePath(volumeID string) string {
return ""
}
func (testcase *testcase) InstanceID() (string, error) {
return testcase.instanceID, nil
}
func (testcase *testcase) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
return resource.Quantity{}, nil
}
func (testcase *testcase) DeleteVolume(volumeID string) error {
return errors.New("Not implemented")
}
func (testcase *testcase) GetAutoLabelsForPD(name string) (map[string]string, error) {
return map[string]string{}, errors.New("Not implemented")
}
func (testcase *testcase) Instances() (cloudprovider.Instances, bool) {
return &instances{testcase.instanceID}, true
}
func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
expected := &testcase.disksAreAttached
areAttached := make(map[string]bool)
if len(expected.volumeIDs) == 0 && expected.instanceID == "" {
// testcase.volumeIDs looks uninitialized, test did not expect to call DisksAreAttached
testcase.t.Errorf("Unexpected DisksAreAttached call!")
return areAttached, errors.New("Unexpected DisksAreAttached call")
}
if !reflect.DeepEqual(expected.volumeIDs, volumeIDs) {
testcase.t.Errorf("Unexpected DisksAreAttached call: expected volumeIDs %v, got %v", expected.volumeIDs, volumeIDs)
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong volumeID")
}
if expected.instanceID != instanceID {
testcase.t.Errorf("Unexpected DisksAreAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong instanceID")
}
glog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret)
return expected.areAttached, expected.ret
}
// Implementation of fake cloudprovider.Instances
type instances struct {
instanceID string
}
func (instances *instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
return []v1.NodeAddress{}, errors.New("Not implemented")
}
func (instances *instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
return []v1.NodeAddress{}, errors.New("Not implemented")
}
func (instances *instances) ExternalID(name types.NodeName) (string, error) {
return "", errors.New("Not implemented")
}
func (instances *instances) InstanceID(name types.NodeName) (string, error) {
return instances.instanceID, nil
}
func (instances *instances) InstanceType(name types.NodeName) (string, error) {
return "", errors.New("Not implemented")
}
func (instances *instances) InstanceTypeByProviderID(providerID string) (string, error) {
return "", errors.New("Not implemented")
}
func (instances *instances) InstanceExistsByProviderID(providerID string) (bool, error) {
return false, errors.New("unimplemented")
}
func (instances *instances) List(filter string) ([]types.NodeName, error) {
return []types.NodeName{}, errors.New("Not implemented")
}
func (instances *instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("Not implemented")
}
func (instances *instances) CurrentNodeName(hostname string) (types.NodeName, error) {
return "", errors.New("Not implemented")
}

541
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go generated vendored Normal file
View File

@ -0,0 +1,541 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"errors"
"fmt"
"os"
"path"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&cinderPlugin{}}
}
type CinderProvider interface {
AttachDisk(instanceID, volumeID string) (string, error)
DetachDisk(instanceID, volumeID string) error
DeleteVolume(volumeID string) error
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error)
GetDevicePath(volumeID string) string
InstanceID() (string, error)
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
OperationPending(diskName string) (bool, string, error)
DiskIsAttached(instanceID, volumeID string) (bool, error)
DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error)
ShouldTrustDevicePath() bool
Instances() (cloudprovider.Instances, bool)
ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
}
type cinderPlugin struct {
host volume.VolumeHost
// Guarding SetUp and TearDown operations
volumeLocks keymutex.KeyMutex
}
var _ volume.VolumePlugin = &cinderPlugin{}
var _ volume.PersistentVolumePlugin = &cinderPlugin{}
var _ volume.DeletableVolumePlugin = &cinderPlugin{}
var _ volume.ProvisionableVolumePlugin = &cinderPlugin{}
const (
cinderVolumePluginName = "kubernetes.io/cinder"
)
func (plugin *cinderPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.volumeLocks = keymutex.NewKeyMutex()
return nil
}
func (plugin *cinderPlugin) GetPluginName() string {
return cinderVolumePluginName
}
func (plugin *cinderPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.VolumeID, nil
}
func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.Volume != nil && spec.Volume.Cinder != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder != nil)
}
func (plugin *cinderPlugin) RequiresRemount() bool {
return false
}
func (plugin *cinderPlugin) SupportsMountOption() bool {
return true
}
func (plugin *cinderPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
cinder, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
pdName := cinder.VolumeID
fsType := cinder.FSType
return &cinderVolumeMounter{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
mounter: mounter,
manager: manager,
plugin: plugin,
},
fsType: fsType,
readOnly: readOnly,
blockDeviceMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) {
return &cinderVolumeUnmounter{
&cinderVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}}, nil
}
func (plugin *cinderPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &CinderDiskUtil{})
}
func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.Cinder is nil")
}
return &cinderVolumeDeleter{
&cinderVolume{
volName: spec.Name(),
pdName: spec.PersistentVolume.Spec.Cinder.VolumeID,
manager: manager,
plugin: plugin,
}}, nil
}
func (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &CinderDiskUtil{})
}
func (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, manager cdManager) (volume.Provisioner, error) {
return &cinderVolumeProvisioner{
cinderVolume: &cinderVolume{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func getCloudProvider(cloudProvider cloudprovider.Interface) (CinderProvider, error) {
if cloud, ok := cloudProvider.(*openstack.OpenStack); ok && cloud != nil {
return cloud, nil
}
return nil, fmt.Errorf("wrong cloud type")
}
func (plugin *cinderPlugin) getCloudProvider() (CinderProvider, error) {
cloud := plugin.host.GetCloudProvider()
if cloud == nil {
glog.Errorf("Cloud provider not initialized properly")
return nil, errors.New("Cloud provider not initialized properly")
}
switch cloud := cloud.(type) {
case *openstack.OpenStack:
return cloud, nil
default:
return nil, errors.New("Invalid cloud provider: expected OpenStack.")
}
}
func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
glog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath)
cinderVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: sourceName,
},
},
}
return volume.NewSpecFromVolume(cinderVolume), nil
}
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
cinder, _, err := getVolumeSource(spec)
if err != nil {
return oldSize, err
}
cloud, err := plugin.getCloudProvider()
if err != nil {
return oldSize, err
}
expandedSize, err := cloud.ExpandVolume(cinder.VolumeID, oldSize, newSize)
if err != nil {
return oldSize, err
}
glog.V(2).Infof("volume %s expanded to new size %d successfully", cinder.VolumeID, int(newSize.Value()))
return expandedSize, nil
}
func (plugin *cinderPlugin) RequiresFSResize() bool {
return true
}
// Abstract interface to PD operations.
type cdManager interface {
// Attaches the disk to the kubelet's host machine.
AttachDisk(mounter *cinderVolumeMounter, globalPDPath string) error
// Detaches the disk from the kubelet's host machine.
DetachDisk(unmounter *cinderVolumeUnmounter) error
// Creates a volume
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *cinderVolumeDeleter) error
}
var _ volume.Mounter = &cinderVolumeMounter{}
type cinderVolumeMounter struct {
*cinderVolume
fsType string
readOnly bool
blockDeviceMounter *mount.SafeFormatAndMount
}
// cinderPersistentDisk volumes are disk resources provided by C3
// that are attached to the kubelet's host machine and exposed to the pod.
type cinderVolume struct {
volName string
podUID types.UID
// Unique identifier of the volume, used to find the disk resource in the provider.
pdName string
// Filesystem type, optional.
fsType string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// Utility interface that provides API calls to the provider to attach/detach disks.
manager cdManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
// diskMounter provides the interface that is used to mount the actual block device.
blockDeviceMounter mount.Interface
plugin *cinderPlugin
volume.MetricsNil
}
func (b *cinderVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *cinderVolumeMounter) CanMount() error {
return nil
}
func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUp bind mounts to the volume path.
func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
b.plugin.volumeLocks.LockKey(b.pdName)
defer b.plugin.volumeLocks.UnlockKey(b.pdName)
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("Cannot validate mount point: %s %v", dir, err)
return err
}
if !notmnt {
glog.V(4).Infof("Something is already mounted to target %s", dir)
return nil
}
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, options)
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
glog.V(4).Infof("Mount failed: %v", err)
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
return err
}
}
os.Remove(dir)
glog.Errorf("Failed to mount %s: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir)
return nil
}
func makeGlobalPDName(host volume.VolumeHost, devName string) string {
return path.Join(host.GetPluginDir(cinderVolumePluginName), mount.MountsInGlobalPDPath, devName)
}
func (cd *cinderVolume) GetPath() string {
name := cinderVolumePluginName
return cd.plugin.host.GetPodVolumeDir(cd.podUID, kstrings.EscapeQualifiedNameForDisk(name), cd.volName)
}
type cinderVolumeUnmounter struct {
*cinderVolume
}
var _ volume.Unmounter = &cinderVolumeUnmounter{}
func (c *cinderVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
glog.V(5).Infof("Cinder TearDown of %s", dir)
notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
return err
}
if notmnt {
glog.V(4).Infof("Nothing is mounted to %s, ignoring", dir)
return os.Remove(dir)
}
// Find Cinder volumeID to lock the right volume
// TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like
// NewMounter. We could then find volumeID there without probing MountRefs.
refs, err := mount.GetMountRefs(c.mounter, dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
return err
}
if len(refs) == 0 {
glog.V(4).Infof("Directory %s is not mounted", dir)
return fmt.Errorf("directory %s is not mounted", dir)
}
c.pdName = path.Base(refs[0])
glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
c.plugin.volumeLocks.LockKey(c.pdName)
defer c.plugin.volumeLocks.UnlockKey(c.pdName)
// Reload list of references, there might be SetUpAt finished in the meantime
refs, err = mount.GetMountRefs(c.mounter, dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
return err
}
if err := c.mounter.Unmount(dir); err != nil {
glog.V(4).Infof("Unmount failed: %v", err)
return err
}
glog.V(3).Infof("Successfully unmounted: %s\n", dir)
notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notmnt {
if err := os.Remove(dir); err != nil {
glog.V(4).Infof("Failed to remove directory after unmount: %v", err)
return err
}
}
return nil
}
type cinderVolumeDeleter struct {
*cinderVolume
}
var _ volume.Deleter = &cinderVolumeDeleter{}
func (r *cinderVolumeDeleter) GetPath() string {
name := cinderVolumePluginName
return r.plugin.host.GetPodVolumeDir(r.podUID, kstrings.EscapeQualifiedNameForDisk(name), r.volName)
}
func (r *cinderVolumeDeleter) Delete() error {
return r.manager.DeleteVolume(r)
}
type cinderVolumeProvisioner struct {
*cinderVolume
options volume.VolumeOptions
}
var _ volume.Provisioner = &cinderVolumeProvisioner{}
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
if err != nil {
return nil, err
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
Labels: labels,
Annotations: map[string]string{
volumehelper.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: fstype,
ReadOnly: false,
},
},
MountOptions: c.options.MountOptions,
},
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
return pv, nil
}
func getVolumeSource(spec *volume.Spec) (*v1.CinderVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.Cinder != nil {
return spec.Volume.Cinder, spec.Volume.Cinder.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Cinder != nil {
return spec.PersistentVolume.Spec.Cinder, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a Cinder volume type")
}

View File

@ -0,0 +1,229 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"fmt"
"os"
"path"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/cinder" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Cinder: &v1.CinderVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
type fakePDManager struct {
// How long should AttachDisk/DetachDisk take - we need slower AttachDisk in a test.
attachDetachDuration time.Duration
}
func getFakeDeviceName(host volume.VolumeHost, pdName string) string {
return path.Join(host.GetPluginDir(cinderVolumePluginName), "device", pdName)
}
// Real Cinder AttachDisk attaches a cinder volume. If it is not yet mounted,
// it mounts it to globalPDPath.
// We create a dummy directory (="device") and bind-mount it to globalPDPath
func (fake *fakePDManager) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
fakeDeviceName := getFakeDeviceName(b.plugin.host, b.pdName)
err := os.MkdirAll(fakeDeviceName, 0750)
if err != nil {
return err
}
// Attaching a Cinder volume can be slow...
time.Sleep(fake.attachDetachDuration)
// The volume is "attached", bind-mount it if it's not mounted yet.
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(globalPath, 0750); err != nil {
return err
}
notmnt = true
} else {
return err
}
}
if notmnt {
err = b.mounter.Mount(fakeDeviceName, globalPath, "", []string{"bind"})
if err != nil {
return err
}
}
return nil
}
func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
fakeDeviceName := getFakeDeviceName(c.plugin.host, c.pdName)
// unmount the bind-mount - should be fast
err := c.mounter.Unmount(globalPath)
if err != nil {
return err
}
// "Detach" the fake "device"
err = os.RemoveAll(fakeDeviceName)
if err != nil {
return err
}
return nil
}
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
return "test-volume-name", 1, nil, "", nil
}
func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error {
if cd.pdName != "test-volume-name" {
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.pdName)
}
return nil
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
},
}
mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cinder/vol1")
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID != "test-volume-name" {
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID)
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*cinderPlugin).newDeleterInternal(volSpec, &fakePDManager{0})
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
}

View File

@ -0,0 +1,247 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/utils/exec"
)
type CinderDiskUtil struct{}
// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
// Mounts the disk to its global path.
func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
options := []string{}
if b.readOnly {
options = append(options, "ro")
}
cloud, err := b.plugin.getCloudProvider()
if err != nil {
return err
}
instanceid, err := cloud.InstanceID()
if err != nil {
return err
}
diskid, err := cloud.AttachDisk(instanceid, b.pdName)
if err != nil {
return err
}
var devicePath string
numTries := 0
for {
devicePath = cloud.GetDevicePath(diskid)
probeAttachedVolume()
_, err := os.Stat(devicePath)
if err == nil {
break
}
if err != nil && !os.IsNotExist(err) {
return err
}
numTries++
if numTries == 10 {
return errors.New("Could not attach disk: Timeout after 60s")
}
time.Sleep(time.Second * 6)
}
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return err
}
notmnt = true
} else {
return err
}
}
if notmnt {
err = b.blockDeviceMounter.FormatAndMount(devicePath, globalPDPath, b.fsType, options)
if err != nil {
os.Remove(globalPDPath)
return err
}
glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
}
return nil
}
// Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName)
if err := cd.mounter.Unmount(globalPDPath); err != nil {
return err
}
if err := os.Remove(globalPDPath); err != nil {
return err
}
glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
cloud, err := cd.plugin.getCloudProvider()
if err != nil {
return err
}
instanceid, err := cloud.InstanceID()
if err != nil {
return err
}
if err = cloud.DetachDisk(instanceid, cd.pdName); err != nil {
return err
}
glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
return nil
}
func (util *CinderDiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error {
cloud, err := cd.plugin.getCloudProvider()
if err != nil {
return err
}
if err = cloud.DeleteVolume(cd.pdName); err != nil {
// OpenStack cloud provider returns volume.tryAgainError when necessary,
// no handling needed here.
glog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err)
return err
}
glog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName)
return nil
}
func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
// TODO: caching, currently it is overkill because it calls this function
// only when it creates dynamic PV
zones := make(sets.String)
nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
glog.V(2).Infof("Error listing nodes")
return zones, err
}
for _, node := range nodes.Items {
if zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]; ok {
zones.Insert(zone)
}
}
glog.V(4).Infof("zones found: %v", zones)
return zones, nil
}
func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
cloud, err := c.plugin.getCloudProvider()
if err != nil {
return "", 0, nil, "", err
}
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
vtype := ""
availability := ""
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range c.options.Parameters {
switch strings.ToLower(k) {
case "type":
vtype = v
case "availability":
availability = v
case volume.VolumeParameterFSType:
fstype = v
default:
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
}
}
// TODO: implement PVC.Selector parsing
if c.options.PVC.Spec.Selector != nil {
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder")
}
if availability == "" {
// No zone specified, choose one randomly in the same region
zones, err := getZonesFromNodes(c.plugin.host.GetKubeClient())
if err != nil {
glog.V(2).Infof("error getting zone information: %v", err)
return "", 0, nil, "", err
}
// if we did not get any zones, lets leave it blank and gophercloud will
// use zone "nova" as default
if len(zones) > 0 {
availability = volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
}
}
volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
if errr != nil {
glog.V(2).Infof("Error creating cinder volume: %v", errr)
return "", 0, nil, "", errr
}
glog.V(2).Infof("Successfully created cinder volume %s", volumeID)
// these are needed that pod is spawning to same AZ
volumeLabels = make(map[string]string)
if IgnoreVolumeAZ == false {
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
}
return volumeID, volSizeGB, volumeLabels, fstype, nil
}
func probeAttachedVolume() error {
// rescan scsi bus
scsiHostRescan()
executor := exec.New()
args := []string{"trigger"}
cmd := executor.Command("udevadm", args...)
_, err := cmd.CombinedOutput()
if err != nil {
glog.Errorf("error running udevadm trigger %v\n", err)
return err
}
glog.V(4).Infof("Successfully probed all attachments")
return nil
}
func scsiHostRescan() {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := ioutil.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
ioutil.WriteFile(name, data, 0666)
}
}
}

18
vendor/k8s.io/kubernetes/pkg/volume/cinder/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cinder contains the internal representation of cinder volumes.
package cinder // import "k8s.io/kubernetes/pkg/volume/cinder"

59
vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD generated vendored Normal file
View File

@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"configmap.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/configmap",
deps = [
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["configmap_test.go"],
importpath = "k8s.io/kubernetes/pkg/volume/configmap",
library = ":go_default_library",
deps = [
"//pkg/volume:go_default_library",
"//pkg/volume/empty_dir:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

14
vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS generated vendored Normal file
View File

@ -0,0 +1,14 @@
approvers:
- pmorie
- saad-ali
- thockin
- matchstick
reviewers:
- ivan4th
- rata
- sjenning
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42

View File

@ -0,0 +1,319 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configmap
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ioutil "k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// ProbeVolumePlugin is the entry point for plugin detection in a package.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&configMapPlugin{}}
}
const (
configMapPluginName = "kubernetes.io/configmap"
)
// configMapPlugin implements the VolumePlugin interface.
type configMapPlugin struct {
host volume.VolumeHost
getConfigMap func(namespace, name string) (*v1.ConfigMap, error)
}
var _ volume.VolumePlugin = &configMapPlugin{}
func (plugin *configMapPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.getConfigMap = host.GetConfigMapFunc()
return nil
}
func (plugin *configMapPlugin) GetPluginName() string {
return configMapPluginName
}
func (plugin *configMapPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _ := getVolumeSource(spec)
if volumeSource == nil {
return "", fmt.Errorf("Spec does not reference a ConfigMap volume type")
}
return fmt.Sprintf(
"%v/%v",
spec.Name(),
volumeSource.Name), nil
}
func (plugin *configMapPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.ConfigMap != nil
}
func (plugin *configMapPlugin) RequiresRemount() bool {
return true
}
func (plugin *configMapPlugin) SupportsMountOption() bool {
return false
}
func (plugin *configMapPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return &configMapVolumeMounter{
configMapVolume: &configMapVolume{
spec.Name(),
pod.UID,
plugin,
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.MetricsNil{},
},
source: *spec.Volume.ConfigMap,
pod: *pod,
opts: &opts,
getConfigMap: plugin.getConfigMap,
}, nil
}
func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &configMapVolumeUnmounter{
&configMapVolume{
volName,
podUID,
plugin,
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.MetricsNil{},
},
}, nil
}
func (plugin *configMapPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
configMapVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
}
return volume.NewSpecFromVolume(configMapVolume), nil
}
type configMapVolume struct {
volName string
podUID types.UID
plugin *configMapPlugin
mounter mount.Interface
writer ioutil.Writer
volume.MetricsNil
}
var _ volume.Volume = &configMapVolume{}
func (sv *configMapVolume) GetPath() string {
return sv.plugin.host.GetPodVolumeDir(sv.podUID, strings.EscapeQualifiedNameForDisk(configMapPluginName), sv.volName)
}
// configMapVolumeMounter handles retrieving secrets from the API server
// and placing them into the volume on the host.
type configMapVolumeMounter struct {
*configMapVolume
source v1.ConfigMapVolumeSource
pod v1.Pod
opts *volume.VolumeOptions
getConfigMap func(namespace, name string) (*v1.ConfigMap, error)
}
var _ volume.Mounter = &configMapVolumeMounter{}
func (sv *configMapVolume) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: true,
Managed: true,
SupportsSELinux: true,
}
}
func wrappedVolumeSpec() volume.Spec {
// This is the spec for the volume that this plugin wraps.
return volume.Spec{
// This should be on a tmpfs instead of the local disk; the problem is
// charging the memory for the tmpfs to the right cgroup. We should make
// this a tmpfs when we can do the accounting correctly.
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *configMapVolumeMounter) CanMount() error {
return nil
}
func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
// Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts)
if err != nil {
return err
}
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
return err
}
optional := b.source.Optional != nil && *b.source.Optional
configMap, err := b.getConfigMap(b.pod.Namespace, b.source.Name)
if err != nil {
if !(errors.IsNotFound(err) && optional) {
glog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err)
return err
}
configMap = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: b.pod.Namespace,
Name: b.source.Name,
},
}
}
totalBytes := totalBytes(configMap)
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
b.pod.Namespace,
b.source.Name,
len(configMap.Data),
totalBytes)
payload, err := MakePayload(b.source.Items, configMap, b.source.DefaultMode, optional)
if err != nil {
return err
}
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
if err != nil {
glog.Errorf("Error creating atomic writer: %v", err)
return err
}
err = writer.Write(payload)
if err != nil {
glog.Errorf("Error writing payload to dir: %v", err)
return err
}
err = volume.SetVolumeOwnership(b, fsGroup)
if err != nil {
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
return err
}
return nil
}
// Note: this function is exported so that it can be called from the projection volume driver
func MakePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) {
if defaultMode == nil {
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
}
payload := make(map[string]volumeutil.FileProjection, len(configMap.Data))
var fileProjection volumeutil.FileProjection
if len(mappings) == 0 {
for name, data := range configMap.Data {
fileProjection.Data = []byte(data)
fileProjection.Mode = *defaultMode
payload[name] = fileProjection
}
} else {
for _, ktp := range mappings {
content, ok := configMap.Data[ktp.Key]
if !ok {
if optional {
continue
}
return nil, fmt.Errorf("configmap references non-existent config key: %s", ktp.Key)
}
fileProjection.Data = []byte(content)
if ktp.Mode != nil {
fileProjection.Mode = *ktp.Mode
} else {
fileProjection.Mode = *defaultMode
}
payload[ktp.Path] = fileProjection
}
}
return payload, nil
}
func totalBytes(configMap *v1.ConfigMap) int {
totalSize := 0
for _, value := range configMap.Data {
totalSize += len(value)
}
return totalSize
}
// configMapVolumeUnmounter handles cleaning up configMap volumes.
type configMapVolumeUnmounter struct {
*configMapVolume
}
var _ volume.Unmounter = &configMapVolumeUnmounter{}
func (c *configMapVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
var readOnly bool
var volumeSource *v1.ConfigMapVolumeSource
if spec.Volume != nil && spec.Volume.ConfigMap != nil {
volumeSource = spec.Volume.ConfigMap
readOnly = spec.ReadOnly
}
return volumeSource, readOnly
}

View File

@ -0,0 +1,611 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configmap
import (
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/empty_dir"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
func TestMakePayload(t *testing.T) {
caseMappingMode := int32(0400)
cases := []struct {
name string
mappings []v1.KeyToPath
configMap *v1.ConfigMap
mode int32
optional bool
payload map[string]util.FileProjection
success bool
}{
{
name: "no overrides",
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
payload: map[string]util.FileProjection{
"foo": {Data: []byte("foo"), Mode: 0644},
"bar": {Data: []byte("bar"), Mode: 0644},
},
success: true,
},
{
name: "basic 1",
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/foo.txt",
},
},
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
payload: map[string]util.FileProjection{
"path/to/foo.txt": {Data: []byte("foo"), Mode: 0644},
},
success: true,
},
{
name: "subdirs",
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
},
},
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
payload: map[string]util.FileProjection{
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
},
success: true,
},
{
name: "subdirs 2",
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
},
},
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
payload: map[string]util.FileProjection{
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
},
success: true,
},
{
name: "subdirs 3",
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
},
{
Key: "bar",
Path: "another/path/to/the/esteemed/bar.bin",
},
},
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
payload: map[string]util.FileProjection{
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
"another/path/to/the/esteemed/bar.bin": {Data: []byte("bar"), Mode: 0644},
},
success: true,
},
{
name: "non existent key",
mappings: []v1.KeyToPath{
{
Key: "zab",
Path: "path/to/foo.txt",
},
},
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
success: false,
},
{
name: "mapping with Mode",
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "foo.txt",
Mode: &caseMappingMode,
},
{
Key: "bar",
Path: "bar.bin",
Mode: &caseMappingMode,
},
},
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
payload: map[string]util.FileProjection{
"foo.txt": {Data: []byte("foo"), Mode: caseMappingMode},
"bar.bin": {Data: []byte("bar"), Mode: caseMappingMode},
},
success: true,
},
{
name: "mapping with defaultMode",
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "foo.txt",
},
{
Key: "bar",
Path: "bar.bin",
},
},
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
payload: map[string]util.FileProjection{
"foo.txt": {Data: []byte("foo"), Mode: 0644},
"bar.bin": {Data: []byte("bar"), Mode: 0644},
},
success: true,
},
{
name: "optional non existent key",
mappings: []v1.KeyToPath{
{
Key: "zab",
Path: "path/to/foo.txt",
},
},
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
},
},
mode: 0644,
optional: true,
payload: map[string]util.FileProjection{},
success: true,
},
}
for _, tc := range cases {
actualPayload, err := MakePayload(tc.mappings, tc.configMap, &tc.mode, tc.optional)
if err != nil && tc.success {
t.Errorf("%v: unexpected failure making payload: %v", tc.name, err)
continue
}
if err == nil && !tc.success {
t.Errorf("%v: unexpected success making payload", tc.name)
continue
}
if !tc.success {
continue
}
if e, a := tc.payload, actualPayload; !reflect.DeepEqual(e, a) {
t.Errorf("%v: expected and actual payload do not match", tc.name)
}
}
}
func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
tempDir, err := ioutil.TempDir("/tmp", "configmap_volume_test.")
if err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
}
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
}
func TestCanSupport(t *testing.T) {
pluginMgr := volume.VolumePluginMgr{}
tempDir, host := newTestHost(t, nil)
defer os.RemoveAll(tempDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plugin.GetPluginName() != configMapPluginName {
t.Errorf("Wrong name: %s", plugin.GetPluginName())
}
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: ""}}}}}) {
t.Errorf("Expected true")
}
if plugin.CanSupport(&volume.Spec{}) {
t.Errorf("Expected false")
}
}
func TestPlugin(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid")
testVolumeName = "test_volume_name"
testNamespace = "test_configmap_namespace"
testName = "test_configmap_name"
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
configMap = configMap(testNamespace, testName)
client = fake.NewSimpleClientset(&configMap)
pluginMgr = volume.VolumePluginMgr{}
tempDir, host = newTestHost(t, client)
)
defer os.RemoveAll(tempDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
if err != nil {
t.Errorf("Failed to GetVolumeName: %v", err)
}
if vName != "test_volume_name/test_configmap_name" {
t.Errorf("Got unexpect VolumeName %v", vName)
}
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
fsGroup := int64(1001)
err = mounter.SetUp(&fsGroup)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
doTestConfigMapDataInVolume(volumePath, configMap, t)
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
// Test the case where the plugin's ready file exists, but the volume dir is not a
// mountpoint, which is the state the system will be in after reboot. The dir
// should be mounter and the configMap data written to it.
func TestPluginReboot(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid3")
testVolumeName = "test_volume_name"
testNamespace = "test_configmap_namespace"
testName = "test_configmap_name"
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
configMap = configMap(testNamespace, testName)
client = fake.NewSimpleClientset(&configMap)
pluginMgr = volume.VolumePluginMgr{}
rootDir, host = newTestHost(t, client)
)
defer os.RemoveAll(rootDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~configmap/test_volume_name", rootDir)
util.SetReady(podMetadataDir)
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
fsGroup := int64(1001)
err = mounter.SetUp(&fsGroup)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
doTestConfigMapDataInVolume(volumePath, configMap, t)
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
func TestPluginOptional(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid")
testVolumeName = "test_volume_name"
testNamespace = "test_configmap_namespace"
testName = "test_configmap_name"
trueVal = true
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
client = fake.NewSimpleClientset()
pluginMgr = volume.VolumePluginMgr{}
tempDir, host = newTestHost(t, client)
)
volumeSpec.VolumeSource.ConfigMap.Optional = &trueVal
defer os.RemoveAll(tempDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
if err != nil {
t.Errorf("Failed to GetVolumeName: %v", err)
}
if vName != "test_volume_name/test_configmap_name" {
t.Errorf("Got unexpect VolumeName %v", vName)
}
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
fsGroup := int64(1001)
err = mounter.SetUp(&fsGroup)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
infos, err := ioutil.ReadDir(volumePath)
if err != nil {
t.Fatalf("couldn't find volume path, %s", volumePath)
}
if len(infos) != 0 {
t.Errorf("empty directory, %s, not found", volumePath)
}
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
func TestPluginKeysOptional(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid")
testVolumeName = "test_volume_name"
testNamespace = "test_configmap_namespace"
testName = "test_configmap_name"
trueVal = true
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
configMap = configMap(testNamespace, testName)
client = fake.NewSimpleClientset(&configMap)
pluginMgr = volume.VolumePluginMgr{}
tempDir, host = newTestHost(t, client)
)
volumeSpec.VolumeSource.ConfigMap.Items = []v1.KeyToPath{
{Key: "data-1", Path: "data-1"},
{Key: "data-2", Path: "data-2"},
{Key: "data-3", Path: "data-3"},
{Key: "missing", Path: "missing"},
}
volumeSpec.VolumeSource.ConfigMap.Optional = &trueVal
defer os.RemoveAll(tempDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
if err != nil {
t.Errorf("Failed to GetVolumeName: %v", err)
}
if vName != "test_volume_name/test_configmap_name" {
t.Errorf("Got unexpect VolumeName %v", vName)
}
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
fsGroup := int64(1001)
err = mounter.SetUp(&fsGroup)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
doTestConfigMapDataInVolume(volumePath, configMap, t)
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
func volumeSpec(volumeName, configMapName string, defaultMode int32) *v1.Volume {
return &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: configMapName,
},
DefaultMode: &defaultMode,
},
},
}
}
func configMap(namespace, name string) v1.ConfigMap {
return v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
"data-2": "value-2",
"data-3": "value-3",
},
}
}
func doTestConfigMapDataInVolume(volumePath string, configMap v1.ConfigMap, t *testing.T) {
for key, value := range configMap.Data {
configMapDataHostPath := path.Join(volumePath, key)
if _, err := os.Stat(configMapDataHostPath); err != nil {
t.Fatalf("SetUp() failed, couldn't find configMap data on disk: %v", configMapDataHostPath)
} else {
actualValue, err := ioutil.ReadFile(configMapDataHostPath)
if err != nil {
t.Fatalf("Couldn't read configMap data from: %v", configMapDataHostPath)
}
if value != string(actualValue) {
t.Errorf("Unexpected value; expected %q, got %q", value, actualValue)
}
}
}
}
func doTestCleanAndTeardown(plugin volume.VolumePlugin, podUID types.UID, testVolumeName, volumePath string, t *testing.T) {
unmounter, err := plugin.NewUnmounter(testVolumeName, podUID)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Fatalf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}

18
vendor/k8s.io/kubernetes/pkg/volume/configmap/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package configmap contains the internal representation of configMap volumes.
package configmap // import "k8s.io/kubernetes/pkg/volume/configmap"

74
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD generated vendored Normal file
View File

@ -0,0 +1,74 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"csi_attacher.go",
"csi_client.go",
"csi_mounter.go",
"csi_plugin.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/csi",
visibility = ["//visibility:public"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"csi_attacher_test.go",
"csi_client_test.go",
"csi_mounter_test.go",
"csi_plugin_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/csi",
library = ":go_default_library",
deps = [
"//pkg/volume:go_default_library",
"//pkg/volume/csi/fake:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/volume/csi/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

4
vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
approvers:
- jsafrane
- saad-ali
- vladimirvivien

272
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go generated vendored Normal file
View File

@ -0,0 +1,272 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"crypto/sha256"
"errors"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1alpha1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/volume"
)
type csiAttacher struct {
plugin *csiPlugin
k8s kubernetes.Interface
waitSleepTime time.Duration
}
// volume.Attacher methods
var _ volume.Attacher = &csiAttacher{}
func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
if spec == nil {
glog.Error(log("attacher.Attach missing volume.Spec"))
return "", errors.New("missing spec")
}
csiSource, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
return "", err
}
node := string(nodeName)
pvName := spec.PersistentVolume.GetName()
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: node,
Attacher: csiSource.Driver,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{Attached: false},
}
_, err = c.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
alreadyExist := false
if err != nil {
if !apierrs.IsAlreadyExists(err) {
glog.Error(log("attacher.Attach failed: %v", err))
return "", err
}
alreadyExist = true
}
if alreadyExist {
glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
} else {
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
}
// probe for attachment update here
// NOTE: any error from waiting for attachment is logged only. This is because
// the primariy intent of the enclosing method is to create VolumeAttachment.
// DONOT return that error here as it is mitigated in attacher.WaitForAttach.
volAttachmentOK := true
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
volAttachmentOK = false
glog.Error(log("attacher.Attach attempted to wait for attachment to be ready, but failed with: %v", err))
}
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment verified=%t: attachment object [%s]", volAttachmentOK, attachID))
return attachID, nil
}
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) {
source, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
return "", err
}
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
}
func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) {
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
ticker := time.NewTicker(c.waitSleepTime)
defer ticker.Stop()
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
defer timer.Stop()
//TODO (vladimirvivien) instead of polling api-server, change to a api-server watch
for {
select {
case <-ticker.C:
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.WaitForAttach failed (will continue to try): %v", err))
continue
}
// if being deleted, fail fast
if attach.GetDeletionTimestamp() != nil {
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
return "", errors.New("volume attachment is being deleted")
}
// attachment OK
if attach.Status.Attached {
return attachID, nil
}
// driver reports attach error
attachErr := attach.Status.AttachError
if attachErr != nil {
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return "", errors.New(attachErr.Message)
}
case <-timer.C:
glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle)
}
}
}
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
attached := make(map[*volume.Spec]bool)
for _, spec := range specs {
if spec == nil {
glog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
return nil, errors.New("missing spec")
}
source, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
continue
}
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
continue
}
glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
attached[spec] = attach.Status.Attached
}
return attached, nil
}
func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
glog.V(4).Info(log("attacher.GetDeviceMountPath is not implemented"))
return "", nil
}
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
glog.V(4).Info(log("attacher.MountDevice is not implemented"))
return nil
}
var _ volume.Detacher = &csiAttacher{}
func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
if volumeName == "" {
glog.Error(log("detacher.Detach missing value for parameter volumeName"))
return errors.New("missing exepected parameter volumeName")
}
parts := strings.Split(volumeName, volNameSep)
if len(parts) != 2 {
glog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
return errors.New("volumeName missing expected data")
}
driverName := parts[0]
volID := parts[1]
attachID := getAttachmentName(volID, driverName, string(nodeName))
if err := c.k8s.StorageV1alpha1().VolumeAttachments().Delete(attachID, nil); err != nil {
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
return err
}
glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
return c.waitForVolumeDetachment(volID, attachID)
}
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error {
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
ticker := time.NewTicker(c.waitSleepTime)
defer ticker.Stop()
timeout := c.waitSleepTime * 10
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
defer timer.Stop()
//TODO (vladimirvivien) instead of polling api-server, change to a api-server watch
for {
select {
case <-ticker.C:
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
//object deleted or never existed, done
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
return nil
}
glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
continue
}
// driver reports attach error
detachErr := attach.Status.DetachError
if detachErr != nil {
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
return errors.New(detachErr.Message)
}
case <-timer.C:
glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
return fmt.Errorf("detachment timed out for volume %v", volumeHandle)
}
}
}
func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
glog.V(4).Info(log("detacher.UnmountDevice is not implemented"))
return nil
}
// getAttachmentName returns csi-<sha252(volName,csiDriverName,NodeName>
func getAttachmentName(volName, csiDriverName, nodeName string) string {
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))
return fmt.Sprintf("csi-%x", result)
}

View File

@ -0,0 +1,321 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"fmt"
"os"
"testing"
"time"
storage "k8s.io/api/storage/v1alpha1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
)
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
return &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: nodeName,
Attacher: "mock",
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
},
}
}
func TestAttacherAttach(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
testCases := []struct {
name string
nodeName string
driverName string
volumeName string
attachID string
shouldFail bool
}{
{
name: "test ok 1",
nodeName: "testnode-01",
driverName: "testdriver-01",
volumeName: "testvol-01",
attachID: getAttachmentName("testvol-01", "testdriver-01", "testnode-01"),
},
{
name: "test ok 2",
nodeName: "node02",
driverName: "driver02",
volumeName: "vol02",
attachID: getAttachmentName("vol02", "driver02", "node02"),
},
{
name: "mismatch vol",
nodeName: "node02",
driverName: "driver02",
volumeName: "vol01",
attachID: getAttachmentName("vol02", "driver02", "node02"),
shouldFail: true,
},
{
name: "mismatch driver",
nodeName: "node02",
driverName: "driver000",
volumeName: "vol02",
attachID: getAttachmentName("vol02", "driver02", "node02"),
shouldFail: true,
},
{
name: "mismatch node",
nodeName: "node000",
driverName: "driver000",
volumeName: "vol02",
attachID: getAttachmentName("vol02", "driver02", "node02"),
shouldFail: true,
},
}
// attacher loop
for i, tc := range testCases {
t.Log("test case: ", tc.name)
spec := volume.NewSpecFromPersistentVolume(makeTestPV(fmt.Sprintf("test-pv%d", i), 10, tc.driverName, tc.volumeName), false)
go func(id, nodename string, fail bool) {
attachID, err := csiAttacher.Attach(spec, types.NodeName(nodename))
if !fail && err != nil {
t.Error("was not expecting failure, but got err: ", err)
}
if attachID != id && !fail {
t.Errorf("expecting attachID %v, got %v", id, attachID)
}
}(tc.attachID, tc.nodeName, tc.shouldFail)
// update attachment to avoid long waitForAttachment
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
// wait for attachment to be saved
var attach *storage.VolumeAttachment
for i := 0; i < 100; i++ {
attach, err = csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
<-ticker.C
continue
}
t.Error(err)
}
if attach != nil {
break
}
}
if attach == nil {
t.Error("attachment not found")
}
attach.Status.Attached = true
_, err = csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Update(attach)
if err != nil {
t.Error(err)
}
}
}
func TestAttacherWaitForVolumeAttachment(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
nodeName := "test-node"
testCases := []struct {
name string
attached bool
attachErr *storage.VolumeError
sleepTime time.Duration
timeout time.Duration
shouldFail bool
}{
{name: "attach ok", attached: true, sleepTime: 10 * time.Millisecond, timeout: 50 * time.Millisecond},
{name: "attachment error", attachErr: &storage.VolumeError{Message: "missing volume"}, sleepTime: 10 * time.Millisecond, timeout: 30 * time.Millisecond},
{name: "time ran out", attached: false, sleepTime: 5 * time.Millisecond},
}
for i, tc := range testCases {
t.Logf("running test: %v", tc.name)
pvName := fmt.Sprintf("test-pv-%d", i)
volID := fmt.Sprintf("test-vol-%d", i)
attachID := getAttachmentName(volID, testDriver, nodeName)
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = tc.attached
attachment.Status.AttachError = tc.attachErr
csiAttacher.waitSleepTime = tc.sleepTime
go func() {
_, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to attach: %v", err)
}
}()
retID, err := csiAttacher.waitForVolumeAttachment(volID, attachID, tc.timeout)
if tc.shouldFail && err == nil {
t.Error("expecting failure, but err is nil")
}
if tc.attachErr != nil {
if tc.attachErr.Message != err.Error() {
t.Errorf("expecting error [%v], got [%v]", tc.attachErr.Message, err.Error())
}
}
if err == nil && retID != attachID {
t.Errorf("attacher.WaitForAttach not returning attachment ID")
}
}
}
func TestAttacherVolumesAreAttached(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
nodeName := "test-node"
testCases := []struct {
name string
attachedStats map[string]bool
}{
{"attach + detach", map[string]bool{"vol-01": true, "vol-02": true, "vol-03": false, "vol-04": false, "vol-05": true}},
{"all detached", map[string]bool{"vol-11": false, "vol-12": false, "vol-13": false, "vol-14": false, "vol-15": false}},
{"all attached", map[string]bool{"vol-21": true, "vol-22": true, "vol-23": true, "vol-24": true, "vol-25": true}},
}
for _, tc := range testCases {
var specs []*volume.Spec
// create and save volume attchments
for volName, stat := range tc.attachedStats {
pv := makeTestPV("test-pv", 10, testDriver, volName)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
specs = append(specs, spec)
attachID := getAttachmentName(volName, testDriver, nodeName)
attachment := makeTestAttachment(attachID, nodeName, pv.GetName())
attachment.Status.Attached = stat
_, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to attach: %v", err)
}
}
// retrieve attached status
stats, err := csiAttacher.VolumesAreAttached(specs, types.NodeName(nodeName))
if err != nil {
t.Fatal(err)
}
if len(tc.attachedStats) != len(stats) {
t.Errorf("expecting %d attachment status, got %d", len(tc.attachedStats), len(stats))
}
// compare attachment status for each spec
for spec, stat := range stats {
source, err := getCSISourceFromSpec(spec)
if err != nil {
t.Error(err)
}
if stat != tc.attachedStats[source.VolumeHandle] {
t.Errorf("expecting volume attachment %t, got %t", tc.attachedStats[source.VolumeHandle], stat)
}
}
}
}
func TestAttacherDetach(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
nodeName := "test-node"
testCases := []struct {
name string
volID string
attachID string
shouldFail bool
}{
{name: "normal test", volID: "vol-001", attachID: getAttachmentName("vol-001", testDriver, nodeName)},
{name: "normal test 2", volID: "vol-002", attachID: getAttachmentName("vol-002", testDriver, nodeName)},
{name: "object not found", volID: "vol-001", attachID: getAttachmentName("vol-002", testDriver, nodeName), shouldFail: true},
}
for _, tc := range testCases {
pv := makeTestPV("test-pv", 10, testDriver, tc.volID)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
attachment := makeTestAttachment(tc.attachID, nodeName, "test-pv")
_, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to attach: %v", err)
}
volumeName, err := plug.GetVolumeName(spec)
if err != nil {
t.Errorf("test case %s failed: %v", tc.name, err)
}
err = csiAttacher.Detach(volumeName, types.NodeName(nodeName))
if tc.shouldFail && err == nil {
t.Fatal("expecting failure, but err = nil")
}
if !tc.shouldFail && err != nil {
t.Fatalf("unexpected err: %v", err)
}
attach, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
if err != nil {
if !apierrs.IsNotFound(err) {
t.Fatalf("unexpected err: %v", err)
}
} else {
if attach == nil {
t.Errorf("expecting attachment not to be nil, but it is")
}
}
}
}

244
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go generated vendored Normal file
View File

@ -0,0 +1,244 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"bytes"
"errors"
"fmt"
"net"
"time"
csipb "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
grpctx "golang.org/x/net/context"
"google.golang.org/grpc"
api "k8s.io/api/core/v1"
)
type csiClient interface {
AssertSupportedVersion(ctx grpctx.Context, ver *csipb.Version) error
NodeProbe(ctx grpctx.Context, ver *csipb.Version) error
NodePublishVolume(
ctx grpctx.Context,
volumeid string,
readOnly bool,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
fsType string,
) error
NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error
}
// csiClient encapsulates all csi-plugin methods
type csiDriverClient struct {
network string
addr string
conn *grpc.ClientConn
idClient csipb.IdentityClient
nodeClient csipb.NodeClient
ctrlClient csipb.ControllerClient
versionAsserted bool
versionSupported bool
publishAsserted bool
publishCapable bool
}
func newCsiDriverClient(network, addr string) *csiDriverClient {
return &csiDriverClient{network: network, addr: addr}
}
// assertConnection ensures a valid connection has been established
// if not, it creates a new connection and associated clients
func (c *csiDriverClient) assertConnection() error {
if c.conn == nil {
conn, err := grpc.Dial(
c.addr,
grpc.WithInsecure(),
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
return net.Dial(c.network, target)
}),
)
if err != nil {
return err
}
c.conn = conn
c.idClient = csipb.NewIdentityClient(conn)
c.nodeClient = csipb.NewNodeClient(conn)
c.ctrlClient = csipb.NewControllerClient(conn)
// set supported version
}
return nil
}
// AssertSupportedVersion ensures driver supports specified spec version.
// If version is not supported, the assertion fails with an error.
// This test should be done early during the storage operation flow to avoid
// unnecessary calls later.
func (c *csiDriverClient) AssertSupportedVersion(ctx grpctx.Context, ver *csipb.Version) error {
if c.versionAsserted {
if !c.versionSupported {
return fmt.Errorf("version %s not supported", verToStr(ver))
}
return nil
}
if err := c.assertConnection(); err != nil {
c.versionAsserted = false
return err
}
glog.V(4).Info(log("asserting version supported by driver"))
rsp, err := c.idClient.GetSupportedVersions(ctx, &csipb.GetSupportedVersionsRequest{})
if err != nil {
c.versionAsserted = false
return err
}
supported := false
vers := rsp.GetSupportedVersions()
glog.V(4).Info(log("driver reports %d versions supported: %s", len(vers), versToStr(vers)))
for _, v := range vers {
//TODO (vladimirvivien) use more lenient/heuristic for exact or match of ranges etc
if verToStr(v) == verToStr(ver) {
supported = true
break
}
}
c.versionAsserted = true
c.versionSupported = supported
if !supported {
return fmt.Errorf("version %s not supported", verToStr(ver))
}
glog.V(4).Info(log("version %s supported", verToStr(ver)))
return nil
}
func (c *csiDriverClient) NodeProbe(ctx grpctx.Context, ver *csipb.Version) error {
glog.V(4).Info(log("sending NodeProbe rpc call to csi driver: [version %v]", ver))
req := &csipb.NodeProbeRequest{Version: ver}
_, err := c.nodeClient.NodeProbe(ctx, req)
return err
}
func (c *csiDriverClient) NodePublishVolume(
ctx grpctx.Context,
volID string,
readOnly bool,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
fsType string,
) error {
glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
}
if targetPath == "" {
return errors.New("missing target path")
}
if err := c.assertConnection(); err != nil {
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
return err
}
req := &csipb.NodePublishVolumeRequest{
Version: csiVersion,
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishVolumeInfo: volumeInfo,
VolumeAttributes: volumeAttribs,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
},
},
},
}
_, err := c.nodeClient.NodePublishVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error {
glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
}
if targetPath == "" {
return errors.New("missing target path")
}
if err := c.assertConnection(); err != nil {
glog.Error(log("failed to assert a connection: %v", err))
return err
}
req := &csipb.NodeUnpublishVolumeRequest{
Version: csiVersion,
VolumeId: volID,
TargetPath: targetPath,
}
_, err := c.nodeClient.NodeUnpublishVolume(ctx, req)
return err
}
func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode {
switch am {
case api.ReadWriteOnce:
return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case api.ReadOnlyMany:
return csipb.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.ReadWriteMany:
return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
}
return csipb.VolumeCapability_AccessMode_UNKNOWN
}
func verToStr(ver *csipb.Version) string {
if ver == nil {
return ""
}
return fmt.Sprintf("%d.%d.%d", ver.GetMajor(), ver.GetMinor(), ver.GetPatch())
}
func versToStr(vers []*csipb.Version) string {
if vers == nil {
return ""
}
str := bytes.NewBufferString("[")
for _, v := range vers {
str.WriteString(fmt.Sprintf("{%s};", verToStr(v)))
}
str.WriteString("]")
return str.String()
}

View File

@ -0,0 +1,149 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"errors"
"testing"
csipb "github.com/container-storage-interface/spec/lib/go/csi"
grpctx "golang.org/x/net/context"
"google.golang.org/grpc"
api "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume/csi/fake"
)
func setupClient(t *testing.T) *csiDriverClient {
client := newCsiDriverClient("unix", "/tmp/test.sock")
client.conn = new(grpc.ClientConn) //avoids creating conn object
// setup mock grpc clients
client.idClient = fake.NewIdentityClient()
client.nodeClient = fake.NewNodeClient()
client.ctrlClient = fake.NewControllerClient()
return client
}
func TestClientAssertSupportedVersion(t *testing.T) {
testCases := []struct {
testName string
ver *csipb.Version
mustFail bool
err error
}{
{testName: "supported version", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}},
{testName: "unsupported version", ver: &csipb.Version{Major: 0, Minor: 0, Patch: 0}, mustFail: true},
{testName: "grpc error", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}, mustFail: true, err: errors.New("grpc error")},
}
for _, tc := range testCases {
t.Log("case: ", tc.testName)
client := setupClient(t)
client.idClient.(*fake.IdentityClient).SetNextError(tc.err)
err := client.AssertSupportedVersion(grpctx.Background(), tc.ver)
if tc.mustFail && err == nil {
t.Error("must fail, but err = nil")
}
}
}
func TestClientNodeProbe(t *testing.T) {
testCases := []struct {
testName string
ver *csipb.Version
mustFail bool
err error
}{
{testName: "supported version", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}},
{testName: "grpc error", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}, mustFail: true, err: errors.New("grpc error")},
}
for _, tc := range testCases {
t.Log("case: ", tc.testName)
client := setupClient(t)
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
err := client.NodeProbe(grpctx.Background(), tc.ver)
if tc.mustFail && err == nil {
t.Error("must fail, but err = nil")
}
}
}
func TestClientNodePublishVolume(t *testing.T) {
testCases := []struct {
name string
volID string
targetPath string
fsType string
mustFail bool
err error
}{
{name: "test ok", volID: "vol-test", targetPath: "/test/path"},
{name: "missing volID", targetPath: "/test/path", mustFail: true},
{name: "missing target path", volID: "vol-test", mustFail: true},
{name: "bad fs", volID: "vol-test", targetPath: "/test/path", fsType: "badfs", mustFail: true},
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t)
for _, tc := range testCases {
t.Log("case: ", tc.name)
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
err := client.NodePublishVolume(
grpctx.Background(),
tc.volID,
false,
tc.targetPath,
api.ReadWriteOnce,
map[string]string{"device": "/dev/null"},
map[string]string{"attr0": "val0"},
tc.fsType,
)
if tc.mustFail && err == nil {
t.Error("must fail, but err is nil: ", err)
}
}
}
func TestClientNodeUnpublishVolume(t *testing.T) {
testCases := []struct {
name string
volID string
targetPath string
mustFail bool
err error
}{
{name: "test ok", volID: "vol-test", targetPath: "/test/path"},
{name: "missing volID", targetPath: "/test/path", mustFail: true},
{name: "missing target path", volID: "vol-test", mustFail: true},
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t)
for _, tc := range testCases {
t.Log("case: ", tc.name)
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
err := client.NodeUnpublishVolume(grpctx.Background(), tc.volID, tc.targetPath)
if tc.mustFail && err == nil {
t.Error("must fail, but err is nil: ", err)
}
}
}

401
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go generated vendored Normal file
View File

@ -0,0 +1,401 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"encoding/json"
"errors"
"fmt"
"os"
"path"
"github.com/golang/glog"
grpctx "golang.org/x/net/context"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
//TODO (vladimirvivien) move this in a central loc later
var (
volDataKey = struct {
specVolID,
volHandle,
driverName,
nodeName,
attachmentID string
}{
"specVolID",
"volumeHandle",
"driverName",
"nodeName",
"attachmentID",
}
)
type csiMountMgr struct {
k8s kubernetes.Interface
csiClient csiClient
plugin *csiPlugin
driverName string
volumeID string
specVolumeID string
readOnly bool
spec *volume.Spec
pod *api.Pod
podUID types.UID
options volume.VolumeOptions
volumeInfo map[string]string
volume.MetricsNil
}
// volume.Volume methods
var _ volume.Volume = &csiMountMgr{}
func (c *csiMountMgr) GetPath() string {
dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount")
glog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
return dir
}
func getTargetPath(uid types.UID, specVolumeID string, host volume.VolumeHost) string {
specVolID := kstrings.EscapeQualifiedNameForDisk(specVolumeID)
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(csiPluginName), specVolID)
}
// volume.Mounter methods
var _ volume.Mounter = &csiMountMgr{}
func (c *csiMountMgr) CanMount() error {
//TODO (vladimirvivien) use this method to probe controller using CSI.NodeProbe() call
// to ensure Node service is ready in the CSI plugin
return nil
}
func (c *csiMountMgr) SetUp(fsGroup *int64) error {
return c.SetUpAt(c.GetPath(), fsGroup)
}
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
mounted, err := isDirMounted(c.plugin, dir)
if err != nil {
glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir))
return err
}
if mounted {
glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir))
return nil
}
csiSource, err := getCSISourceFromSpec(c.spec)
if err != nil {
glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
return err
}
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
defer cancel()
csi := c.csiClient
nodeName := string(c.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// ensure version is supported
if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil {
glog.Error(log("mounter.SetUpAt failed to assert version: %v", err))
return err
}
// probe driver
// TODO (vladimirvivien) move probe call where it is done only when it is needed.
if err := csi.NodeProbe(ctx, csiVersion); err != nil {
glog.Error(log("mounter.SetUpAt failed to probe driver: %v", err))
return err
}
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
if c.volumeInfo == nil {
attachment, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
return err
}
if attachment == nil {
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
}
c.volumeInfo = attachment.Status.AttachmentMetadata
}
// get volume attributes
// TODO: for alpha vol atttributes are passed via PV.Annotations
// Beta will fix that
attribs, err := getVolAttribsFromSpec(c.spec)
if err != nil {
glog.Error(log("mounter.SetUpAt failed to extract volume attributes from PV annotations: %v", err))
return err
}
// create target_dir before call to NodePublish
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", dir))
// persist volume info data for teardown
volData := map[string]string{
volDataKey.specVolID: c.spec.Name(),
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
volDataKey.nodeName: nodeName,
volDataKey.attachmentID: attachID,
}
if err := saveVolumeData(c.plugin, c.podUID, c.spec.Name(), volData); err != nil {
glog.Error(log("mounter.SetUpAt failed to save volume info data: %v", err))
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.SetUpAt failed to remove mount dir after a saveVolumeData() error [%s]: %v", dir, err))
return err
}
return err
}
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := api.ReadWriteOnce
if c.spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
}
err = csi.NodePublishVolume(
ctx,
c.volumeID,
c.readOnly,
dir,
accessMode,
c.volumeInfo,
attribs,
"ext4", //TODO needs to be sourced from PV or somewhere else
)
if err != nil {
glog.Errorf(log("mounter.SetupAt failed: %v", err))
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.SetuAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
return err
}
return err
}
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
return nil
}
func (c *csiMountMgr) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: c.readOnly,
Managed: !c.readOnly,
SupportsSELinux: false,
}
}
// volume.Unmounter methods
var _ volume.Unmounter = &csiMountMgr{}
func (c *csiMountMgr) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *csiMountMgr) TearDownAt(dir string) error {
glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir))
// is dir even mounted ?
// TODO (vladimirvivien) this check may not work for an emptyDir or local storage
// see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524
mounted, err := isDirMounted(c.plugin, dir)
if err != nil {
glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err))
return err
}
if !mounted {
glog.V(4).Info(log("unmounter.Teardown skipping unmout, dir not mounted [%s]", dir))
return nil
}
// load volume info from file
dataDir := path.Dir(dir) // dropoff /mount at end
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmounter.Teardown failed to load volume data file using dir [%s]: %v", dir, err))
return err
}
volID := data[volDataKey.volHandle]
driverName := data[volDataKey.driverName]
if c.csiClient == nil {
addr := fmt.Sprintf(csiAddrTemplate, driverName)
client := newCsiDriverClient("unix", addr)
glog.V(4).Infof(log("unmounter csiClient setup [volume=%v,driver=%v]", volID, driverName))
c.csiClient = client
}
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
defer cancel()
csi := c.csiClient
// TODO make all assertion calls private within the client itself
if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil {
glog.Errorf(log("mounter.SetUpAt failed to assert version: %v", err))
return err
}
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
glog.Errorf(log("mounter.SetUpAt failed: %v", err))
return err
}
// clean mount point dir
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.SetUpAt failed to clean mount dir [%s]: %v", dir, err))
return err
}
glog.V(4).Infof(log("mounte.SetUpAt successfully unmounted dir [%s]", dir))
return nil
}
// getVolAttribsFromSpec exracts CSI VolumeAttributes information from PV.Annotations
// using key csi.kubernetes.io/volume-attributes. The annotation value is expected
// to be a JSON-encoded object of form {"key0":"val0",...,"keyN":"valN"}
func getVolAttribsFromSpec(spec *volume.Spec) (map[string]string, error) {
if spec == nil {
return nil, errors.New("missing volume spec")
}
annotations := spec.PersistentVolume.GetAnnotations()
if annotations == nil {
return nil, nil // no annotations found
}
jsonAttribs := annotations[csiVolAttribsAnnotationKey]
if jsonAttribs == "" {
return nil, nil // csi annotation not found
}
attribs := map[string]string{}
if err := json.Unmarshal([]byte(jsonAttribs), &attribs); err != nil {
glog.Error(log("error parsing csi PV.Annotation [%s]=%s: %v", csiVolAttribsAnnotationKey, jsonAttribs, err))
return nil, err
}
return attribs, nil
}
// saveVolumeData persists parameter data as json file using the locagion
// generated by /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
func saveVolumeData(p *csiPlugin, podUID types.UID, specVolID string, data map[string]string) error {
dir := getTargetPath(podUID, specVolID, p.host)
dataFilePath := path.Join(dir, volDataFileName)
file, err := os.Create(dataFilePath)
if err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
defer file.Close()
if err := json.NewEncoder(file).Encode(data); err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
return nil
}
// loadVolumeData uses the directory returned by mounter.GetPath with value
// /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolumeId>/mount.
// The function extracts specVolumeID and uses it to load the json data file from dir
// /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
// remove /mount at the end
dataFileName := path.Join(dir, fileName)
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
file, err := os.Open(dataFileName)
if err != nil {
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
return nil, err
}
defer file.Close()
data := map[string]string{}
if err := json.NewDecoder(file).Decode(&data); err != nil {
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
return nil, err
}
return data, nil
}
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
mounter := plug.host.GetMounter(plug.GetPluginName())
notMnt, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
return false, err
}
return !notMnt, nil
}
// removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir
func removeMountDir(plug *csiPlugin, mountPath string) error {
glog.V(4).Info(log("removing mount path [%s]", mountPath))
if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil {
glog.Error(log("failed while checking mount path stat [%s]", pathErr))
return pathErr
} else if !pathExists {
glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath))
return nil
}
mounter := plug.host.GetMounter(plug.GetPluginName())
notMnt, err := mounter.IsLikelyNotMountPoint(mountPath)
if err != nil {
glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err))
return err
}
if notMnt {
glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to remove dir [%s]: %v", mountPath, err))
return err
}
// remove volume data file as well
dataFile := path.Join(path.Dir(mountPath), volDataFileName)
glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
return err
}
}
return nil
}

View File

@ -0,0 +1,298 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"testing"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1alpha1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
fakeclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/fake"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
var (
testDriver = "test-driver"
testVol = "vol-123"
testns = "test-ns"
testPodUID = types.UID("test-pod")
)
func TestMounterGetPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "spec-0", "/mount")),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "test.spec.1", "/mount")),
},
}
for _, tc := range testCases {
t.Log("test case:", tc.name)
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mounter, err := plug.NewMounter(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
csiMounter := mounter.(*csiMountMgr)
path := csiMounter.GetPath()
t.Log("*** GetPath: ", path)
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
}
}
func TestMounterSetUp(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
tmpDir,
fakeClient,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
pvName := pv.GetName()
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t)
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: "test-node",
Attacher: csiPluginName,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
},
}
_, err = csiMounter.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
// Mounter.SetUp()
if err := csiMounter.SetUp(nil); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*csiDriverClient).nodeClient.(*fake.NodeClient).GetNodePublishedVolumes()
if pubs[csiMounter.volumeID] != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
}
}
func TestUnmounterTeardown(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
if err != nil {
t.Fatalf("failed to make a new Unmounter: %v", err)
}
csiUnmounter := unmounter.(*csiMountMgr)
csiUnmounter.csiClient = setupClient(t)
dir := csiUnmounter.GetPath()
// save the data file prior to unmount
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
plug,
testPodUID,
"test-pv",
map[string]string{volDataKey.specVolID: "test-pv", volDataKey.driverName: "driver", volDataKey.volHandle: "vol-handle"},
); err != nil {
t.Fatal("failed to save volume data:", err)
}
err = csiUnmounter.TearDownAt(dir)
if err != nil {
t.Fatal(err)
}
// ensure csi client call
pubs := csiUnmounter.csiClient.(*csiDriverClient).nodeClient.(*fake.NodeClient).GetNodePublishedVolumes()
if _, ok := pubs[csiUnmounter.volumeID]; ok {
t.Error("csi server may not have received NodeUnpublishVolume call")
}
}
func TestGetVolAttribsFromSpec(t *testing.T) {
testCases := []struct {
name string
annotations map[string]string
attribs map[string]string
shouldFail bool
}{
{
name: "attribs ok",
annotations: map[string]string{"key0": "val0", csiVolAttribsAnnotationKey: `{"k0":"attr0","k1":"attr1","k2":"attr2"}`, "keyN": "valN"},
attribs: map[string]string{"k0": "attr0", "k1": "attr1", "k2": "attr2"},
},
{
name: "missing attribs",
annotations: map[string]string{"key0": "val0", "keyN": "valN"},
},
{
name: "missing annotations",
},
{
name: "bad json",
annotations: map[string]string{"key0": "val0", csiVolAttribsAnnotationKey: `{"k0""attr0","k1":"attr1,"k2":"attr2"`, "keyN": "valN"},
attribs: map[string]string{"k0": "attr0", "k1": "attr1", "k2": "attr2"},
shouldFail: true,
},
}
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, testDriver, testVol), false)
for _, tc := range testCases {
t.Log("test case:", tc.name)
spec.PersistentVolume.Annotations = tc.annotations
attribs, err := getVolAttribsFromSpec(spec)
if !tc.shouldFail && err != nil {
t.Error("test case should not fail, but err != nil", err)
}
eq := true
for k, v := range attribs {
if tc.attribs[k] != v {
eq = false
}
}
if !eq {
t.Errorf("expecting attribs %#v, but got %#v", tc.attribs, attribs)
}
}
}
func TestSaveVolumeData(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
data map[string]string
shouldFail bool
}{
{name: "test with data ok", data: map[string]string{"key0": "val0", "_key1": "val1", "key2": "val2"}},
{name: "test with data ok 2 ", data: map[string]string{"_key0_": "val0", "&key1": "val1", "key2": "val2"}},
}
for i, tc := range testCases {
t.Log("test case:", tc.name)
specVolID := fmt.Sprintf("spec-volid-%d", i)
mountDir := path.Join(getTargetPath(testPodUID, specVolID, plug.host), "/mount")
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
}
err := saveVolumeData(plug, testPodUID, specVolID, tc.data)
if !tc.shouldFail && err != nil {
t.Error("unexpected failure: ", err)
}
// did file get created
dataDir := getTargetPath(testPodUID, specVolID, plug.host)
file := path.Join(dataDir, volDataFileName)
if _, err := os.Stat(file); err != nil {
t.Error("failed to create data dir:", err)
}
// validate content
data, err := ioutil.ReadFile(file)
if !tc.shouldFail && err != nil {
t.Error("failed to read data file:", err)
}
jsonData := new(bytes.Buffer)
if err := json.NewEncoder(jsonData).Encode(tc.data); err != nil {
t.Error("failed to encode json:", err)
}
if string(data) != jsonData.String() {
t.Errorf("expecting encoded data %v, got %v", string(data), jsonData)
}
}
}

253
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go generated vendored Normal file
View File

@ -0,0 +1,253 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"errors"
"fmt"
"regexp"
"time"
csipb "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
const (
csiPluginName = "kubernetes.io/csi"
csiVolAttribsAnnotationKey = "csi.volume.kubernetes.io/volume-attributes"
// TODO (vladimirvivien) implement a more dynamic way to discover
// the unix domain socket path for each installed csi driver.
// TODO (vladimirvivien) would be nice to name socket with a .sock extension
// for consistency.
csiAddrTemplate = "/var/lib/kubelet/plugins/%v/csi.sock"
csiTimeout = 15 * time.Second
volNameSep = "^"
volDataFileName = "vol_data.json"
)
var (
// csiVersion supported csi version
csiVersion = &csipb.Version{Major: 0, Minor: 1, Patch: 0}
driverNameRexp = regexp.MustCompile(`^[A-Za-z]+(\.?-?_?[A-Za-z0-9-])+$`)
)
type csiPlugin struct {
host volume.VolumeHost
}
// ProbeVolumePlugins returns implemented plugins
func ProbeVolumePlugins() []volume.VolumePlugin {
p := &csiPlugin{
host: nil,
}
return []volume.VolumePlugin{p}
}
// volume.VolumePlugin methods
var _ volume.VolumePlugin = &csiPlugin{}
func (p *csiPlugin) Init(host volume.VolumeHost) error {
glog.Info(log("plugin initializing..."))
p.host = host
return nil
}
func (p *csiPlugin) GetPluginName() string {
return csiPluginName
}
// GetvolumeName returns a concatenated string of CSIVolumeSource.Driver<volNameSe>CSIVolumeSource.VolumeHandle
// That string value is used in Detach() to extract driver name and volumeName.
func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
csi, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err))
return "", err
}
//TODO (vladimirvivien) this validation should be done at the API validation check
if !isDriverNameValid(csi.Driver) {
glog.Error(log("plugin.GetVolumeName failed to create volume name: invalid csi driver name %s", csi.Driver))
return "", errors.New("invalid csi driver name")
}
// return driverName<separator>volumeHandle
return fmt.Sprintf("%s%s%s", csi.Driver, volNameSep, csi.VolumeHandle), nil
}
func (p *csiPlugin) CanSupport(spec *volume.Spec) bool {
// TODO (vladimirvivien) CanSupport should also take into account
// the availability/registration of specified Driver in the volume source
return spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil
}
func (p *csiPlugin) RequiresRemount() bool {
return false
}
func (p *csiPlugin) NewMounter(
spec *volume.Spec,
pod *api.Pod,
_ volume.VolumeOptions) (volume.Mounter, error) {
pvSource, err := getCSISourceFromSpec(spec)
if err != nil {
return nil, err
}
// TODO (vladimirvivien) consider moving this check in API validation
// check Driver name to conform to CSI spec
if !isDriverNameValid(pvSource.Driver) {
glog.Error(log("driver name does not conform to CSI spec: %s", pvSource.Driver))
return nil, errors.New("driver name is invalid")
}
// before it is used in any paths such as socket etc
addr := fmt.Sprintf(csiAddrTemplate, pvSource.Driver)
glog.V(4).Infof(log("setting up mounter for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
client := newCsiDriverClient("unix", addr)
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("failed to get a kubernetes client"))
return nil, errors.New("failed to get a Kubernetes client")
}
mounter := &csiMountMgr{
plugin: p,
k8s: k8s,
spec: spec,
pod: pod,
podUID: pod.UID,
driverName: pvSource.Driver,
volumeID: pvSource.VolumeHandle,
specVolumeID: spec.Name(),
csiClient: client,
}
return mounter, nil
}
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
unmounter := &csiMountMgr{
plugin: p,
podUID: podUID,
specVolumeID: specName,
}
return unmounter, nil
}
func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
volData, err := loadVolumeData(mountPath, volDataFileName)
if err != nil {
glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
return nil, err
}
glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: volData[volDataKey.specVolID],
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
CSI: &api.CSIPersistentVolumeSource{
Driver: volData[volDataKey.driverName],
VolumeHandle: volData[volDataKey.volHandle],
},
},
},
}
return volume.NewSpecFromPersistentVolume(pv, false), nil
}
func (p *csiPlugin) SupportsMountOption() bool {
// TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags
// to probe for the result for this method:w
return false
}
func (p *csiPlugin) SupportsBulkVolumeVerification() bool {
return false
}
// volume.AttachableVolumePlugin methods
var _ volume.AttachableVolumePlugin = &csiPlugin{}
func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("unable to get kubernetes client from host"))
return nil, errors.New("unable to get Kubernetes client")
}
return &csiAttacher{
plugin: p,
k8s: k8s,
waitSleepTime: 1 * time.Second,
}, nil
}
func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("unable to get kubernetes client from host"))
return nil, errors.New("unable to get Kubernetes client")
}
return &csiAttacher{
plugin: p,
k8s: k8s,
waitSleepTime: 1 * time.Second,
}, nil
}
func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := p.host.GetMounter(p.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
}
func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CSI != nil {
return spec.PersistentVolume.Spec.CSI, nil
}
return nil, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
}
// log prepends log string with `kubernetes.io/csi`
func log(msg string, parts ...interface{}) string {
return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...)
}
// isDriverNameValid validates the driverName using CSI spec
func isDriverNameValid(name string) bool {
if len(name) == 0 || len(name) > 63 {
return false
}
return driverNameRexp.MatchString(name)
}

View File

@ -0,0 +1,310 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"fmt"
"os"
"path"
"testing"
api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
fakeclient "k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
// create a plugin mgr to load plugins and setup a fake client
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
}
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHost(
tmpDir,
fakeClient,
nil,
)
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plug, err := plugMgr.FindPluginByName(csiPluginName)
if err != nil {
t.Fatalf("can't find plugin %v", csiPluginName)
}
csiPlug, ok := plug.(*csiPlugin)
if !ok {
t.Fatalf("cannot assert plugin to be type csiPlugin")
}
return csiPlug, tmpDir
}
func makeTestPV(name string, sizeGig int, driverName, volID string) *api.PersistentVolume {
return &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: name,
Namespace: testns,
},
Spec: api.PersistentVolumeSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(
fmt.Sprintf("%dGi", sizeGig),
),
},
PersistentVolumeSource: api.PersistentVolumeSource{
CSI: &api.CSIPersistentVolumeSource{
Driver: driverName,
VolumeHandle: volID,
ReadOnly: false,
},
},
},
}
}
func TestPluginGetPluginName(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
if plug.GetPluginName() != "kubernetes.io/csi" {
t.Errorf("unexpected plugin name %v", plug.GetPluginName())
}
}
func TestPluginGetVolumeName(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
driverName string
volName string
shouldFail bool
}{
{"alphanum names", "testdr", "testvol", false},
{"mixchar driver", "test.dr.cc", "testvol", false},
{"mixchar volume", "testdr", "test-vol-name", false},
{"mixchars all", "test-driver", "test.vol.name", false},
}
for _, tc := range testCases {
t.Logf("testing: %s", tc.name)
pv := makeTestPV("test-pv", 10, tc.driverName, tc.volName)
spec := volume.NewSpecFromPersistentVolume(pv, false)
name, err := plug.GetVolumeName(spec)
if tc.shouldFail && err == nil {
t.Fatal("GetVolumeName should fail, but got err=nil")
}
if name != fmt.Sprintf("%s%s%s", tc.driverName, volNameSep, tc.volName) {
t.Errorf("unexpected volume name %s", name)
}
}
}
func TestPluginCanSupport(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, false)
if !plug.CanSupport(spec) {
t.Errorf("should support CSI spec")
}
}
func TestPluginConstructVolumeSpec(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
specVolID string
data map[string]string
shouldFail bool
}{
{
name: "valid spec name",
specVolID: "test.vol.id",
data: map[string]string{volDataKey.specVolID: "test.vol.id", volDataKey.volHandle: "test-vol0", volDataKey.driverName: "test-driver0"},
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
dir := getTargetPath(testPodUID, tc.specVolID, plug.host)
// create the data file
if tc.data != nil {
mountDir := path.Join(getTargetPath(testPodUID, tc.specVolID, plug.host), "/mount")
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
}
if err := saveVolumeData(plug, testPodUID, tc.specVolID, tc.data); err != nil {
t.Fatal(err)
}
}
// rebuild spec
spec, err := plug.ConstructVolumeSpec("test-pv", dir)
if tc.shouldFail {
if err == nil {
t.Fatal("expecting ConstructVolumeSpec to fail, but got nil error")
}
continue
}
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
if volHandle != tc.data[volDataKey.volHandle] {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
}
if spec.Name() != tc.specVolID {
t.Errorf("Unexpected spec name %s", spec.Name())
}
}
}
func TestPluginNewMounter(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
// validate mounter fields
if csiMounter.driverName != testDriver {
t.Error("mounter driver name not set")
}
if csiMounter.volumeID != testVol {
t.Error("mounter volume id not set")
}
if csiMounter.pod == nil {
t.Error("mounter pod not set")
}
if csiMounter.podUID == types.UID("") {
t.Error("mounter podUID mot set")
}
}
func TestPluginNewUnmounter(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
csiUnmounter := unmounter.(*csiMountMgr)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if csiUnmounter == nil {
t.Fatal("failed to create CSI mounter")
}
if csiUnmounter.podUID != testPodUID {
t.Error("podUID not set")
}
}
func TestValidateDriverName(t *testing.T) {
testCases := []struct {
name string
driverName string
valid bool
}{
{"ok no punctuations", "comgooglestoragecsigcepd", true},
{"ok dot only", "io.kubernetes.storage.csi.flex", true},
{"ok dash only", "io-kubernetes-storage-csi-flex", true},
{"ok underscore only", "io_kubernetes_storage_csi_flex", true},
{"ok dot underscores", "io.kubernetes.storage_csi.flex", true},
{"ok dot dash underscores", "io.kubernetes-storage.csi_flex", true},
{"invalid length 0", "", false},
{"invalid length > 63", "comgooglestoragecsigcepdcomgooglestoragecsigcepdcomgooglestoragecsigcepdcomgooglestoragecsigcepd", false},
{"invalid start char", "_comgooglestoragecsigcepd", false},
{"invalid end char", "comgooglestoragecsigcepd/", false},
{"invalid separators", "com/google/storage/csi~gcepd", false},
}
for _, tc := range testCases {
t.Logf("test case: %v", tc.name)
drValid := isDriverNameValid(tc.driverName)
if tc.valid != drValid {
t.Errorf("expecting driverName %s as valid=%t, but got valid=%t", tc.driverName, tc.valid, drValid)
}
}
}
func TestPluginNewAttacher(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
if csiAttacher.plugin == nil {
t.Error("plugin not set for attacher")
}
if csiAttacher.k8s == nil {
t.Error("Kubernetes client not set for attacher")
}
}
func TestPluginNewDetacher(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
detacher, err := plug.NewDetacher()
if err != nil {
t.Fatalf("failed to create new detacher: %v", err)
}
csiDetacher := detacher.(*csiAttacher)
if csiDetacher.plugin == nil {
t.Error("plugin not set for detacher")
}
if csiDetacher.k8s == nil {
t.Error("Kubernetes client not set for attacher")
}
}

27
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD generated vendored Normal file
View File

@ -0,0 +1,27 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["fake_client.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/fake",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,230 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
"errors"
"strings"
"google.golang.org/grpc"
csipb "github.com/container-storage-interface/spec/lib/go/csi"
grpctx "golang.org/x/net/context"
)
// IdentityClient is a CSI identity client used for testing
type IdentityClient struct {
nextErr error
}
// NewIdentityClient returns a new IdentityClient
func NewIdentityClient() *IdentityClient {
return &IdentityClient{}
}
// SetNextError injects expected error
func (f *IdentityClient) SetNextError(err error) {
f.nextErr = err
}
// GetSupportedVersions returns supported version
func (f *IdentityClient) GetSupportedVersions(ctx grpctx.Context, req *csipb.GetSupportedVersionsRequest, opts ...grpc.CallOption) (*csipb.GetSupportedVersionsResponse, error) {
// short circuit with an error
if f.nextErr != nil {
return nil, f.nextErr
}
rsp := &csipb.GetSupportedVersionsResponse{
SupportedVersions: []*csipb.Version{
{Major: 0, Minor: 0, Patch: 1},
{Major: 0, Minor: 1, Patch: 0},
{Major: 1, Minor: 0, Patch: 0},
{Major: 1, Minor: 0, Patch: 1},
{Major: 1, Minor: 1, Patch: 1},
},
}
return rsp, nil
}
// GetPluginInfo returns plugin info
func (f *IdentityClient) GetPluginInfo(ctx context.Context, in *csipb.GetPluginInfoRequest, opts ...grpc.CallOption) (*csipb.GetPluginInfoResponse, error) {
return nil, nil
}
// NodeClient returns CSI node client
type NodeClient struct {
nodePublishedVolumes map[string]string
nextErr error
}
// NewNodeClient returns fake node client
func NewNodeClient() *NodeClient {
return &NodeClient{nodePublishedVolumes: make(map[string]string)}
}
// SetNextError injects next expected error
func (f *NodeClient) SetNextError(err error) {
f.nextErr = err
}
// GetNodePublishedVolumes returns node published volumes
func (f *NodeClient) GetNodePublishedVolumes() map[string]string {
return f.nodePublishedVolumes
}
// NodePublishVolume implements CSI NodePublishVolume
func (f *NodeClient) NodePublishVolume(ctx grpctx.Context, req *csipb.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodePublishVolumeResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
}
if req.GetVolumeId() == "" {
return nil, errors.New("missing volume id")
}
if req.GetTargetPath() == "" {
return nil, errors.New("missing target path")
}
fsTypes := "ext4|xfs|zfs"
fsType := req.GetVolumeCapability().GetMount().GetFsType()
if !strings.Contains(fsTypes, fsType) {
return nil, errors.New("invlid fstype")
}
f.nodePublishedVolumes[req.GetVolumeId()] = req.GetTargetPath()
return &csipb.NodePublishVolumeResponse{}, nil
}
// NodeProbe implements csi NodeProbe
func (f *NodeClient) NodeProbe(ctx context.Context, req *csipb.NodeProbeRequest, opts ...grpc.CallOption) (*csipb.NodeProbeResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
}
if req.Version == nil {
return nil, errors.New("missing version")
}
return &csipb.NodeProbeResponse{}, nil
}
// NodeUnpublishVolume implements csi method
func (f *NodeClient) NodeUnpublishVolume(ctx context.Context, req *csipb.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeUnpublishVolumeResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
}
if req.GetVolumeId() == "" {
return nil, errors.New("missing volume id")
}
if req.GetTargetPath() == "" {
return nil, errors.New("missing target path")
}
delete(f.nodePublishedVolumes, req.GetVolumeId())
return &csipb.NodeUnpublishVolumeResponse{}, nil
}
// GetNodeID implements method
func (f *NodeClient) GetNodeID(ctx context.Context, in *csipb.GetNodeIDRequest, opts ...grpc.CallOption) (*csipb.GetNodeIDResponse, error) {
return nil, nil
}
// NodeGetCapabilities implements csi method
func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.NodeGetCapabilitiesResponse, error) {
return nil, nil
}
// ControllerClient represents a CSI Controller client
type ControllerClient struct {
nextCapabilities []*csipb.ControllerServiceCapability
nextErr error
}
// NewControllerClient returns a ControllerClient
func NewControllerClient() *ControllerClient {
return &ControllerClient{}
}
// SetNextError injects next expected error
func (f *ControllerClient) SetNextError(err error) {
f.nextErr = err
}
// SetNextCapabilities injects next expected capabilities
func (f *ControllerClient) SetNextCapabilities(caps []*csipb.ControllerServiceCapability) {
f.nextCapabilities = caps
}
// ControllerGetCapabilities implements csi method
func (f *ControllerClient) ControllerGetCapabilities(ctx context.Context, in *csipb.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.ControllerGetCapabilitiesResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
}
if f.nextCapabilities == nil {
f.nextCapabilities = []*csipb.ControllerServiceCapability{
{
Type: &csipb.ControllerServiceCapability_Rpc{
Rpc: &csipb.ControllerServiceCapability_RPC{
Type: csipb.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
},
},
},
}
}
return &csipb.ControllerGetCapabilitiesResponse{
Capabilities: f.nextCapabilities,
}, nil
}
// CreateVolume implements csi method
func (f *ControllerClient) CreateVolume(ctx context.Context, in *csipb.CreateVolumeRequest, opts ...grpc.CallOption) (*csipb.CreateVolumeResponse, error) {
return nil, nil
}
// DeleteVolume implements csi method
func (f *ControllerClient) DeleteVolume(ctx context.Context, in *csipb.DeleteVolumeRequest, opts ...grpc.CallOption) (*csipb.DeleteVolumeResponse, error) {
return nil, nil
}
// ControllerPublishVolume implements csi method
func (f *ControllerClient) ControllerPublishVolume(ctx context.Context, in *csipb.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipb.ControllerPublishVolumeResponse, error) {
return nil, nil
}
// ControllerUnpublishVolume implements csi method
func (f *ControllerClient) ControllerUnpublishVolume(ctx context.Context, in *csipb.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipb.ControllerUnpublishVolumeResponse, error) {
return nil, nil
}
// ValidateVolumeCapabilities implements csi method
func (f *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *csipb.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.ValidateVolumeCapabilitiesResponse, error) {
return nil, nil
}
// ListVolumes implements csi method
func (f *ControllerClient) ListVolumes(ctx context.Context, in *csipb.ListVolumesRequest, opts ...grpc.CallOption) (*csipb.ListVolumesResponse, error) {
return nil, nil
}
// GetCapacity implements csi method
func (f *ControllerClient) GetCapacity(ctx context.Context, in *csipb.GetCapacityRequest, opts ...grpc.CallOption) (*csipb.GetCapacityResponse, error) {
return nil, nil
}
// ControllerProbe implements csi method
func (f *ControllerClient) ControllerProbe(ctx context.Context, in *csipb.ControllerProbeRequest, opts ...grpc.CallOption) (*csipb.ControllerProbeResponse, error) {
return nil, nil
}

19
vendor/k8s.io/kubernetes/pkg/volume/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package volume includes internal representations of external volume types
// as well as utility methods required to mount/unmount volumes to kubelets.
package volume // import "k8s.io/kubernetes/pkg/volume"

56
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD generated vendored Normal file
View File

@ -0,0 +1,56 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["downwardapi.go"],
importpath = "k8s.io/kubernetes/pkg/volume/downwardapi",
deps = [
"//pkg/api/v1/resource:go_default_library",
"//pkg/fieldpath:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["downwardapi_test.go"],
importpath = "k8s.io/kubernetes/pkg/volume/downwardapi",
library = ":go_default_library",
deps = [
"//pkg/fieldpath:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/empty_dir:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

14
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS generated vendored Normal file
View File

@ -0,0 +1,14 @@
approvers:
- pmorie
- saad-ali
- thockin
- matchstick
reviewers:
- ivan4th
- rata
- sjenning
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42

View File

@ -0,0 +1,303 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package downwardapi
import (
"fmt"
"path"
"sort"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/pkg/api/v1/resource"
"k8s.io/kubernetes/pkg/fieldpath"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"github.com/golang/glog"
)
// ProbeVolumePlugins is the entry point for plugin detection in a package.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&downwardAPIPlugin{}}
}
const (
downwardAPIPluginName = "kubernetes.io/downward-api"
)
// downwardAPIPlugin implements the VolumePlugin interface.
type downwardAPIPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &downwardAPIPlugin{}
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}}},
}
}
func (plugin *downwardAPIPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *downwardAPIPlugin) GetPluginName() string {
return downwardAPIPluginName
}
func (plugin *downwardAPIPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _ := getVolumeSource(spec)
if volumeSource == nil {
return "", fmt.Errorf("Spec does not reference a DownwardAPI volume type")
}
// Return user defined volume name, since this is an ephemeral volume type
return spec.Name(), nil
}
func (plugin *downwardAPIPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.DownwardAPI != nil
}
func (plugin *downwardAPIPlugin) RequiresRemount() bool {
return true
}
func (plugin *downwardAPIPlugin) SupportsMountOption() bool {
return false
}
func (plugin *downwardAPIPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
v := &downwardAPIVolume{
volName: spec.Name(),
items: spec.Volume.DownwardAPI.Items,
pod: pod,
podUID: pod.UID,
plugin: plugin,
}
return &downwardAPIVolumeMounter{
downwardAPIVolume: v,
source: *spec.Volume.DownwardAPI,
opts: &opts,
}, nil
}
func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &downwardAPIVolumeUnmounter{
&downwardAPIVolume{
volName: volName,
podUID: podUID,
plugin: plugin,
},
}, nil
}
func (plugin *downwardAPIPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
downwardAPIVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{},
},
}
return volume.NewSpecFromVolume(downwardAPIVolume), nil
}
// downwardAPIVolume retrieves downward API data and placing them into the volume on the host.
type downwardAPIVolume struct {
volName string
items []v1.DownwardAPIVolumeFile
pod *v1.Pod
podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *v1.POD and not only types.UID
plugin *downwardAPIPlugin
volume.MetricsNil
}
// downwardAPIVolumeMounter fetches info from downward API from the pod
// and dumps it in files
type downwardAPIVolumeMounter struct {
*downwardAPIVolume
source v1.DownwardAPIVolumeSource
opts *volume.VolumeOptions
}
// downwardAPIVolumeMounter implements volume.Mounter interface
var _ volume.Mounter = &downwardAPIVolumeMounter{}
// downward API volumes are always ReadOnlyManaged
func (d *downwardAPIVolume) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: true,
Managed: true,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *downwardAPIVolumeMounter) CanMount() error {
return nil
}
// SetUp puts in place the volume plugin.
// This function is not idempotent by design. We want the data to be refreshed periodically.
// The internal sync interval of kubelet will drive the refresh of data.
// TODO: Add volume specific ticker and refresh loop
func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts)
if err != nil {
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err
}
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err
}
data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode)
if err != nil {
glog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err
}
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
if err != nil {
glog.Errorf("Error creating atomic writer: %v", err)
return err
}
err = writer.Write(data)
if err != nil {
glog.Errorf("Error writing payload to dir: %v", err)
return err
}
err = volume.SetVolumeOwnership(b, fsGroup)
if err != nil {
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
return err
}
return nil
}
// CollectData collects requested downwardAPI in data map.
// Map's key is the requested name of file to dump
// Map's value is the (sorted) content of the field to be dumped in the file.
//
// Note: this function is exported so that it can be called from the projection volume driver
func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.VolumeHost, defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
if defaultMode == nil {
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
}
errlist := []error{}
data := make(map[string]volumeutil.FileProjection)
for _, fileInfo := range items {
var fileProjection volumeutil.FileProjection
fPath := path.Clean(fileInfo.Path)
if fileInfo.Mode != nil {
fileProjection.Mode = *fileInfo.Mode
} else {
fileProjection.Mode = *defaultMode
}
if fileInfo.FieldRef != nil {
// TODO: unify with Kubelet.podFieldSelectorRuntimeValue
if values, err := fieldpath.ExtractFieldPathAsString(pod, fileInfo.FieldRef.FieldPath); err != nil {
glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error())
errlist = append(errlist, err)
} else {
fileProjection.Data = []byte(sortLines(values))
}
} else if fileInfo.ResourceFieldRef != nil {
containerName := fileInfo.ResourceFieldRef.ContainerName
nodeAllocatable, err := host.GetNodeAllocatable()
if err != nil {
errlist = append(errlist, err)
} else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil {
glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
errlist = append(errlist, err)
} else {
fileProjection.Data = []byte(sortLines(values))
}
}
data[fPath] = fileProjection
}
return data, utilerrors.NewAggregate(errlist)
}
// sortLines sorts the strings generated from map based data
// (annotations and labels)
func sortLines(values string) string {
splitted := strings.Split(values, "\n")
sort.Strings(splitted)
return strings.Join(splitted, "\n")
}
func (d *downwardAPIVolume) GetPath() string {
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName), d.volName)
}
// downwardAPIVolumeCleaner handles cleaning up downwardAPI volumes
type downwardAPIVolumeUnmounter struct {
*downwardAPIVolume
}
// downwardAPIVolumeUnmounter implements volume.Unmounter interface
var _ volume.Unmounter = &downwardAPIVolumeUnmounter{}
func (c *downwardAPIVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func (b *downwardAPIVolumeMounter) getMetaDir() string {
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName)
}
func getVolumeSource(spec *volume.Spec) (*v1.DownwardAPIVolumeSource, bool) {
var readOnly bool
var volumeSource *v1.DownwardAPIVolumeSource
if spec.Volume != nil && spec.Volume.DownwardAPI != nil {
volumeSource = spec.Volume.DownwardAPI
readOnly = spec.ReadOnly
}
return volumeSource, readOnly
}

View File

@ -0,0 +1,399 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package downwardapi
import (
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/empty_dir"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
downwardAPIDir = "..data"
testPodUID = types.UID("test_pod_uid")
testNamespace = "test_metadata_namespace"
testName = "test_metadata_name"
)
func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
tempDir, err := utiltesting.MkTmpdir("downwardApi_volume_test.")
if err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
}
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
}
func TestCanSupport(t *testing.T) {
pluginMgr := volume.VolumePluginMgr{}
tmpDir, host := newTestHost(t, nil)
defer os.RemoveAll(tmpDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plugin.GetPluginName() != downwardAPIPluginName {
t.Errorf("Wrong name: %s", plugin.GetPluginName())
}
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{DownwardAPI: &v1.DownwardAPIVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
func TestDownwardAPI(t *testing.T) {
labels1 := map[string]string{
"key1": "value1",
"key2": "value2",
}
labels2 := map[string]string{
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
annotations := map[string]string{
"a1": "value1",
"a2": "value2",
}
testCases := []struct {
name string
files map[string]string
modes map[string]int32
podLabels map[string]string
podAnnotations map[string]string
steps []testStep
}{
{
name: "test_labels",
files: map[string]string{"labels": "metadata.labels"},
podLabels: labels1,
steps: []testStep{
// for steps that involve files, stepName is also
// used as the name of the file to verify
verifyMapInFile{stepName{"labels"}, labels1},
},
},
{
name: "test_annotations",
files: map[string]string{"annotations": "metadata.annotations"},
podAnnotations: annotations,
steps: []testStep{
verifyMapInFile{stepName{"annotations"}, annotations},
},
},
{
name: "test_name",
files: map[string]string{"name_file_name": "metadata.name"},
steps: []testStep{
verifyLinesInFile{stepName{"name_file_name"}, testName},
},
},
{
name: "test_namespace",
files: map[string]string{"namespace_file_name": "metadata.namespace"},
steps: []testStep{
verifyLinesInFile{stepName{"namespace_file_name"}, testNamespace},
},
},
{
name: "test_write_twice_no_update",
files: map[string]string{"labels": "metadata.labels"},
podLabels: labels1,
steps: []testStep{
reSetUp{stepName{"resetup"}, false, nil},
verifyMapInFile{stepName{"labels"}, labels1},
},
},
{
name: "test_write_twice_with_update",
files: map[string]string{"labels": "metadata.labels"},
podLabels: labels1,
steps: []testStep{
reSetUp{stepName{"resetup"}, true, labels2},
verifyMapInFile{stepName{"labels"}, labels2},
},
},
{
name: "test_write_with_unix_path",
files: map[string]string{
"these/are/my/labels": "metadata.labels",
"these/are/your/annotations": "metadata.annotations",
},
podLabels: labels1,
podAnnotations: annotations,
steps: []testStep{
verifyMapInFile{stepName{"these/are/my/labels"}, labels1},
verifyMapInFile{stepName{"these/are/your/annotations"}, annotations},
},
},
{
name: "test_write_with_two_consecutive_slashes_in_the_path",
files: map[string]string{"this//labels": "metadata.labels"},
podLabels: labels1,
steps: []testStep{
verifyMapInFile{stepName{"this/labels"}, labels1},
},
},
{
name: "test_default_mode",
files: map[string]string{"name_file_name": "metadata.name"},
steps: []testStep{
verifyMode{stepName{"name_file_name"}, 0644},
},
},
{
name: "test_item_mode",
files: map[string]string{"name_file_name": "metadata.name"},
modes: map[string]int32{"name_file_name": 0400},
steps: []testStep{
verifyMode{stepName{"name_file_name"}, 0400},
},
},
}
for _, testCase := range testCases {
test := newDownwardAPITest(t, testCase.name, testCase.files, testCase.podLabels, testCase.podAnnotations, testCase.modes)
for _, step := range testCase.steps {
test.t.Logf("Test case: %q Step: %q", testCase.name, step.getName())
step.run(test)
}
test.tearDown()
}
}
type downwardAPITest struct {
t *testing.T
name string
plugin volume.VolumePlugin
pod *v1.Pod
mounter volume.Mounter
volumePath string
rootDir string
}
func newDownwardAPITest(t *testing.T, name string, volumeFiles, podLabels, podAnnotations map[string]string, modes map[string]int32) *downwardAPITest {
defaultMode := int32(0644)
var files []v1.DownwardAPIVolumeFile
for path, fieldPath := range volumeFiles {
file := v1.DownwardAPIVolumeFile{
Path: path,
FieldRef: &v1.ObjectFieldSelector{
FieldPath: fieldPath,
},
}
if mode, found := modes[path]; found {
file.Mode = &mode
}
files = append(files, file)
}
podMeta := metav1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Labels: podLabels,
Annotations: podAnnotations,
}
clientset := fake.NewSimpleClientset(&v1.Pod{ObjectMeta: podMeta})
pluginMgr := volume.VolumePluginMgr{}
rootDir, host := newTestHost(t, clientset)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
volumeSpec := &v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: files,
},
},
}
podMeta.UID = testPodUID
pod := &v1.Pod{ObjectMeta: podMeta}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
volumePath := mounter.GetPath()
err = mounter.SetUp(nil)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
// downwardAPI volume should create its own empty wrapper path
podWrapperMetadataDir := fmt.Sprintf("%v/pods/%v/plugins/kubernetes.io~empty-dir/wrapped_%v", rootDir, testPodUID, name)
if _, err := os.Stat(podWrapperMetadataDir); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, empty-dir wrapper path was not created: %s", podWrapperMetadataDir)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
return &downwardAPITest{
t: t,
plugin: plugin,
pod: pod,
mounter: mounter,
volumePath: volumePath,
rootDir: rootDir,
}
}
func (test *downwardAPITest) tearDown() {
unmounter, err := test.plugin.NewUnmounter(test.name, testPodUID)
if err != nil {
test.t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
test.t.Fatalf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
test.t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(test.volumePath); err == nil {
test.t.Errorf("TearDown() failed, volume path still exists: %s", test.volumePath)
} else if !os.IsNotExist(err) {
test.t.Errorf("TearDown() failed: %v", err)
}
os.RemoveAll(test.rootDir)
}
// testStep represents a named step of downwardAPITest.
// For steps that deal with files, step name also serves
// as the name of the file that's used by the step.
type testStep interface {
getName() string
run(*downwardAPITest)
}
type stepName struct {
name string
}
func (step stepName) getName() string { return step.name }
func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected string) {
data, err := ioutil.ReadFile(path.Join(volumePath, filename))
if err != nil {
t.Errorf(err.Error())
return
}
actualStr := sortLines(string(data))
expectedStr := sortLines(expected)
if actualStr != expectedStr {
t.Errorf("Found `%s`, expected `%s`", actualStr, expectedStr)
}
}
type verifyLinesInFile struct {
stepName
expected string
}
func (step verifyLinesInFile) run(test *downwardAPITest) {
doVerifyLinesInFile(test.t, test.volumePath, step.name, step.expected)
}
type verifyMapInFile struct {
stepName
expected map[string]string
}
func (step verifyMapInFile) run(test *downwardAPITest) {
doVerifyLinesInFile(test.t, test.volumePath, step.name, fieldpath.FormatMap(step.expected))
}
type verifyMode struct {
stepName
expectedMode int32
}
func (step verifyMode) run(test *downwardAPITest) {
fileInfo, err := os.Stat(path.Join(test.volumePath, step.name))
if err != nil {
test.t.Errorf(err.Error())
return
}
actualMode := fileInfo.Mode()
expectedMode := os.FileMode(step.expectedMode)
if actualMode != expectedMode {
test.t.Errorf("Found mode `%v` expected %v", actualMode, expectedMode)
}
}
type reSetUp struct {
stepName
linkShouldChange bool
newLabels map[string]string
}
func (step reSetUp) run(test *downwardAPITest) {
if step.newLabels != nil {
test.pod.ObjectMeta.Labels = step.newLabels
}
currentTarget, err := os.Readlink(path.Join(test.volumePath, downwardAPIDir))
if err != nil {
test.t.Errorf("labels file should be a link... %s\n", err.Error())
}
// now re-run Setup
if err = test.mounter.SetUp(nil); err != nil {
test.t.Errorf("Failed to re-setup volume: %v", err)
}
// get the link of the link
currentTarget2, err := os.Readlink(path.Join(test.volumePath, downwardAPIDir))
if err != nil {
test.t.Errorf(".current should be a link... %s\n", err.Error())
}
switch {
case step.linkShouldChange && currentTarget2 == currentTarget:
test.t.Errorf("Got and update between the two Setup... Target link should NOT be the same\n")
case !step.linkShouldChange && currentTarget2 != currentTarget:
test.t.Errorf("No update between the two Setup... Target link should be the same %s %s\n",
currentTarget, currentTarget2)
}
}

78
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD generated vendored Normal file
View File

@ -0,0 +1,78 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"empty_dir.go",
"empty_dir_unsupported.go",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"empty_dir_linux.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/empty_dir",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"//conditions:default": [],
}),
)
go_test(
name = "go_default_test",
srcs = select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"empty_dir_test.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/empty_dir",
library = ":go_default_library",
deps = select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

14
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS generated vendored Normal file
View File

@ -0,0 +1,14 @@
approvers:
- pmorie
- saad-ali
- thockin
- matchstick
reviewers:
- ivan4th
- rata
- sjenning
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42

19
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package empty_dir contains the internal representation of emptyDir
// volumes.
package empty_dir // import "k8s.io/kubernetes/pkg/volume/empty_dir"

View File

@ -0,0 +1,440 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package empty_dir
import (
"fmt"
"os"
"path"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/util/mount"
stringsutil "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// TODO: in the near future, this will be changed to be more restrictive
// and the group will be set to allow containers to use emptyDir volumes
// from the group attribute.
//
// http://issue.k8s.io/2630
const perm os.FileMode = 0777
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{
&emptyDirPlugin{nil},
}
}
type emptyDirPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &emptyDirPlugin{}
const (
emptyDirPluginName = "kubernetes.io/empty-dir"
hugePagesPageSizeMountOption = "pagesize"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, stringsutil.EscapeQualifiedNameForDisk(emptyDirPluginName), volName)
}
func (plugin *emptyDirPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *emptyDirPlugin) GetPluginName() string {
return emptyDirPluginName
}
func (plugin *emptyDirPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _ := getVolumeSource(spec)
if volumeSource == nil {
return "", fmt.Errorf("Spec does not reference an EmptyDir volume type")
}
// Return user defined volume name, since this is an ephemeral volume type
return spec.Name(), nil
}
func (plugin *emptyDirPlugin) CanSupport(spec *volume.Spec) bool {
if spec.Volume != nil && spec.Volume.EmptyDir != nil {
return true
}
return false
}
func (plugin *emptyDirPlugin) RequiresRemount() bool {
return false
}
func (plugin *emptyDirPlugin) SupportsMountOption() bool {
return false
}
func (plugin *emptyDirPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(plugin.GetPluginName()), &realMountDetector{plugin.host.GetMounter(plugin.GetPluginName())}, opts)
}
func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Mounter, error) {
medium := v1.StorageMediumDefault
if spec.Volume.EmptyDir != nil { // Support a non-specified source as EmptyDir.
medium = spec.Volume.EmptyDir.Medium
}
return &emptyDir{
pod: pod,
volName: spec.Name(),
medium: medium,
mounter: mounter,
mountDetector: mountDetector,
plugin: plugin,
MetricsProvider: volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host)),
}, nil
}
func (plugin *emptyDirPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()), &realMountDetector{plugin.host.GetMounter(plugin.GetPluginName())})
}
func (plugin *emptyDirPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, mountDetector mountDetector) (volume.Unmounter, error) {
ed := &emptyDir{
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
volName: volName,
medium: v1.StorageMediumDefault, // might be changed later
mounter: mounter,
mountDetector: mountDetector,
plugin: plugin,
MetricsProvider: volume.NewMetricsDu(getPath(podUID, volName, plugin.host)),
}
return ed, nil
}
func (plugin *emptyDirPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
emptyDirVolume := &v1.Volume{
Name: volName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}
return volume.NewSpecFromVolume(emptyDirVolume), nil
}
// mountDetector abstracts how to find what kind of mount a path is backed by.
type mountDetector interface {
// GetMountMedium determines what type of medium a given path is backed
// by and whether that path is a mount point. For example, if this
// returns (mediumMemory, false, nil), the caller knows that the path is
// on a memory FS (tmpfs on Linux) but is not the root mountpoint of
// that tmpfs.
GetMountMedium(path string) (storageMedium, bool, error)
}
type storageMedium int
const (
mediumUnknown storageMedium = 0 // assume anything we don't explicitly handle is this
mediumMemory storageMedium = 1 // memory (e.g. tmpfs on linux)
mediumHugepages storageMedium = 2 // hugepages
)
// EmptyDir volumes are temporary directories exposed to the pod.
// These do not persist beyond the lifetime of a pod.
type emptyDir struct {
pod *v1.Pod
volName string
medium v1.StorageMedium
mounter mount.Interface
mountDetector mountDetector
plugin *emptyDirPlugin
volume.MetricsProvider
}
func (ed *emptyDir) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: false,
Managed: true,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *emptyDir) CanMount() error {
return nil
}
// SetUp creates new directory.
func (ed *emptyDir) SetUp(fsGroup *int64) error {
return ed.SetUpAt(ed.GetPath(), fsGroup)
}
// SetUpAt creates new directory.
func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := ed.mounter.IsLikelyNotMountPoint(dir)
// Getting an os.IsNotExist err from is a contingency; the directory
// may not exist yet, in which case, setup should run.
if err != nil && !os.IsNotExist(err) {
return err
}
// If the plugin readiness file is present for this volume, and the
// storage medium is the default, then the volume is ready. If the
// medium is memory, and a mountpoint is present, then the volume is
// ready.
if volumeutil.IsReady(ed.getMetaDir()) {
if ed.medium == v1.StorageMediumMemory && !notMnt {
return nil
} else if ed.medium == v1.StorageMediumDefault {
return nil
}
}
switch ed.medium {
case v1.StorageMediumDefault:
err = ed.setupDir(dir)
case v1.StorageMediumMemory:
err = ed.setupTmpfs(dir)
case v1.StorageMediumHugePages:
err = ed.setupHugepages(dir)
default:
err = fmt.Errorf("unknown storage medium %q", ed.medium)
}
volume.SetVolumeOwnership(ed, fsGroup)
if err == nil {
volumeutil.SetReady(ed.getMetaDir())
}
return err
}
// setupTmpfs creates a tmpfs mount at the specified directory.
func (ed *emptyDir) setupTmpfs(dir string) error {
if ed.mounter == nil {
return fmt.Errorf("memory storage requested, but mounter is nil")
}
if err := ed.setupDir(dir); err != nil {
return err
}
// Make SetUp idempotent.
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
if err != nil {
return err
}
// If the directory is a mountpoint with medium memory, there is no
// work to do since we are already in the desired state.
if isMnt && medium == mediumMemory {
return nil
}
glog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName)
return ed.mounter.Mount("tmpfs", dir, "tmpfs", nil /* options */)
}
// setupHugepages creates a hugepage mount at the specified directory.
func (ed *emptyDir) setupHugepages(dir string) error {
if ed.mounter == nil {
return fmt.Errorf("memory storage requested, but mounter is nil")
}
if err := ed.setupDir(dir); err != nil {
return err
}
// Make SetUp idempotent.
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
if err != nil {
return err
}
// If the directory is a mountpoint with medium hugepages, there is no
// work to do since we are already in the desired state.
if isMnt && medium == mediumHugepages {
return nil
}
pageSizeMountOption, err := getPageSizeMountOptionFromPod(ed.pod)
if err != nil {
return err
}
glog.V(3).Infof("pod %v: mounting hugepages for volume %v", ed.pod.UID, ed.volName)
return ed.mounter.Mount("nodev", dir, "hugetlbfs", []string{pageSizeMountOption})
}
// getPageSizeMountOptionFromPod retrieves pageSize mount option from Pod's resources
// and validates pageSize options in all containers of given Pod.
func getPageSizeMountOptionFromPod(pod *v1.Pod) (string, error) {
pageSizeFound := false
pageSize := resource.Quantity{}
// In some rare cases init containers can also consume Huge pages.
containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
for _, container := range containers {
// We can take request because limit and requests must match.
for requestName := range container.Resources.Requests {
if v1helper.IsHugePageResourceName(requestName) {
currentPageSize, err := v1helper.HugePageSizeFromResourceName(requestName)
if err != nil {
return "", err
}
// PageSize for all volumes in a POD are equal, except for the first one discovered.
if pageSizeFound && pageSize.Cmp(currentPageSize) != 0 {
return "", fmt.Errorf("multiple pageSizes for huge pages in a single PodSpec")
}
pageSize = currentPageSize
pageSizeFound = true
}
}
}
if !pageSizeFound {
return "", fmt.Errorf("hugePages storage requested, but there is no resource request for huge pages.")
}
return fmt.Sprintf("%s=%s", hugePagesPageSizeMountOption, pageSize.String()), nil
}
// setupDir creates the directory with the default permissions specified by the perm constant.
func (ed *emptyDir) setupDir(dir string) error {
// Create the directory if it doesn't already exist.
if err := os.MkdirAll(dir, perm); err != nil {
return err
}
// stat the directory to read permission bits
fileinfo, err := os.Lstat(dir)
if err != nil {
return err
}
if fileinfo.Mode().Perm() != perm.Perm() {
// If the permissions on the created directory are wrong, the
// kubelet is probably running with a umask set. In order to
// avoid clearing the umask for the entire process or locking
// the thread, clearing the umask, creating the dir, restoring
// the umask, and unlocking the thread, we do a chmod to set
// the specific bits we need.
err := os.Chmod(dir, perm)
if err != nil {
return err
}
fileinfo, err = os.Lstat(dir)
if err != nil {
return err
}
if fileinfo.Mode().Perm() != perm.Perm() {
glog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm())
}
}
return nil
}
func (ed *emptyDir) GetPath() string {
return getPath(ed.pod.UID, ed.volName, ed.plugin.host)
}
// TearDown simply discards everything in the directory.
func (ed *emptyDir) TearDown() error {
return ed.TearDownAt(ed.GetPath())
}
// TearDownAt simply discards everything in the directory.
func (ed *emptyDir) TearDownAt(dir string) error {
if pathExists, pathErr := volumeutil.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
// Figure out the medium.
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
if err != nil {
return err
}
if isMnt {
if medium == mediumMemory {
ed.medium = v1.StorageMediumMemory
return ed.teardownTmpfsOrHugetlbfs(dir)
} else if medium == mediumHugepages {
ed.medium = v1.StorageMediumHugePages
return ed.teardownTmpfsOrHugetlbfs(dir)
}
}
// assume StorageMediumDefault
return ed.teardownDefault(dir)
}
func (ed *emptyDir) teardownDefault(dir string) error {
// Renaming the directory is not required anymore because the operation executor
// now handles duplicate operations on the same volume
err := os.RemoveAll(dir)
if err != nil {
return err
}
return nil
}
func (ed *emptyDir) teardownTmpfsOrHugetlbfs(dir string) error {
if ed.mounter == nil {
return fmt.Errorf("memory storage requested, but mounter is nil")
}
if err := ed.mounter.Unmount(dir); err != nil {
return err
}
if err := os.RemoveAll(dir); err != nil {
return err
}
return nil
}
func (ed *emptyDir) getMetaDir() string {
return path.Join(ed.plugin.host.GetPodPluginDir(ed.pod.UID, stringsutil.EscapeQualifiedNameForDisk(emptyDirPluginName)), ed.volName)
}
func getVolumeSource(spec *volume.Spec) (*v1.EmptyDirVolumeSource, bool) {
var readOnly bool
var volumeSource *v1.EmptyDirVolumeSource
if spec.Volume != nil && spec.Volume.EmptyDir != nil {
volumeSource = spec.Volume.EmptyDir
readOnly = spec.ReadOnly
}
return volumeSource, readOnly
}

View File

@ -0,0 +1,58 @@
// +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package empty_dir
import (
"fmt"
"github.com/golang/glog"
"golang.org/x/sys/unix"
"k8s.io/kubernetes/pkg/util/mount"
)
// Defined by Linux - the type number for tmpfs mounts.
const (
linuxTmpfsMagic = 0x01021994
linuxHugetlbfsMagic = 0x958458f6
)
// realMountDetector implements mountDetector in terms of syscalls.
type realMountDetector struct {
mounter mount.Interface
}
func (m *realMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
glog.V(5).Infof("Determining mount medium of %v", path)
notMnt, err := m.mounter.IsLikelyNotMountPoint(path)
if err != nil {
return 0, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err)
}
buf := unix.Statfs_t{}
if err := unix.Statfs(path, &buf); err != nil {
return 0, false, fmt.Errorf("statfs(%q): %v", path, err)
}
glog.V(5).Infof("Statfs_t of %v: %+v", path, buf)
if buf.Type == linuxTmpfsMagic {
return mediumMemory, !notMnt, nil
} else if int64(buf.Type) == linuxHugetlbfsMagic {
return mediumHugepages, !notMnt, nil
}
return mediumUnknown, !notMnt, nil
}

View File

@ -0,0 +1,441 @@
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package empty_dir
import (
"os"
"path"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
// Construct an instance of a plugin, by name.
func makePluginUnderTest(t *testing.T, plugName, basePath string) volume.VolumePlugin {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(basePath, nil, nil))
plug, err := plugMgr.FindPluginByName(plugName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
return plug
}
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("emptydirTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
if plug.GetPluginName() != "kubernetes.io/empty-dir" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
type fakeMountDetector struct {
medium storageMedium
isMount bool
}
func (fake *fakeMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
return fake.medium, fake.isMount, nil
}
func TestPluginEmptyRootContext(t *testing.T) {
doTestPlugin(t, pluginTestConfig{
medium: v1.StorageMediumDefault,
expectedSetupMounts: 0,
expectedTeardownMounts: 0})
}
func TestPluginHugetlbfs(t *testing.T) {
doTestPlugin(t, pluginTestConfig{
medium: v1.StorageMediumHugePages,
expectedSetupMounts: 1,
expectedTeardownMounts: 0,
shouldBeMountedBeforeTeardown: true,
})
}
type pluginTestConfig struct {
medium v1.StorageMedium
idempotent bool
expectedSetupMounts int
shouldBeMountedBeforeTeardown bool
expectedTeardownMounts int
}
// doTestPlugin sets up a volume and tears it back down.
func doTestPlugin(t *testing.T, config pluginTestConfig) {
basePath, err := utiltesting.MkTmpdir("emptydir_volume_test")
if err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
}
defer os.RemoveAll(basePath)
var (
volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume")
metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume")
plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
volumeName = "test-volume"
spec = &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: config.medium}},
}
physicalMounter = mount.FakeMounter{}
mountDetector = fakeMountDetector{}
pod = &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID("poduid"),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
},
},
},
},
},
}
)
if config.idempotent {
physicalMounter.MountPoints = []mount.MountPoint{
{
Path: volumePath,
},
}
util.SetReady(metadataDir)
}
mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec),
pod,
&physicalMounter,
&mountDetector,
volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := mounter.GetPath()
if volPath != volumePath {
t.Errorf("Got unexpected path: %s", volPath)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
// Stat the directory and check the permission bits
fileinfo, err := os.Stat(volPath)
if !config.idempotent {
if err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volPath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if e, a := perm, fileinfo.Mode().Perm(); e != a {
t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a)
}
} else if err == nil {
// If this test is for idempotency and we were able
// to stat the volume path, it's an error.
t.Errorf("Volume directory was created unexpectedly")
}
// Check the number of mounts performed during setup
if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a {
t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a)
} else if config.expectedSetupMounts == 1 &&
(physicalMounter.Log[0].Action != mount.FakeActionMount || (physicalMounter.Log[0].FSType != "tmpfs" && physicalMounter.Log[0].FSType != "hugetlbfs")) {
t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0])
}
physicalMounter.ResetLog()
// Make an unmounter for the volume
teardownMedium := mediumUnknown
if config.medium == v1.StorageMediumMemory {
teardownMedium = mediumMemory
}
unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
// Tear down the volume
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volPath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volPath)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
// Check the number of physicalMounter calls during tardown
if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a {
t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a)
} else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount {
t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0])
}
physicalMounter.ResetLog()
}
func TestPluginBackCompat(t *testing.T) {
basePath, err := utiltesting.MkTmpdir("emptydirTest")
if err != nil {
t.Fatalf("can't make a temp dir %v", err)
}
defer os.RemoveAll(basePath)
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
spec := &v1.Volume{
Name: "vol1",
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
volPath := mounter.GetPath()
if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") {
t.Errorf("Got unexpected path: %s", volPath)
}
}
// TestMetrics tests that MetricProvider methods return sane values.
func TestMetrics(t *testing.T) {
// Create an empty temp directory for the volume
tmpDir, err := utiltesting.MkTmpdir("empty_dir_test")
if err != nil {
t.Fatalf("Can't make a tmp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
spec := &v1.Volume{
Name: "vol1",
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
// Need to create the subdirectory
os.MkdirAll(mounter.GetPath(), 0755)
expectedEmptyDirUsage, err := volumetest.FindEmptyDirectoryUsageOnTmpfs()
if err != nil {
t.Errorf("Unexpected error finding expected empty directory usage on tmpfs: %v", err)
}
// TODO(pwittroc): Move this into a reusable testing utility
metrics, err := mounter.GetMetrics()
if err != nil {
t.Errorf("Unexpected error when calling GetMetrics %v", err)
}
if e, a := expectedEmptyDirUsage.Value(), metrics.Used.Value(); e != a {
t.Errorf("Unexpected value for empty directory; expected %v, got %v", e, a)
}
if metrics.Capacity.Value() <= 0 {
t.Errorf("Expected Capacity to be greater than 0")
}
if metrics.Available.Value() <= 0 {
t.Errorf("Expected Available to be greater than 0")
}
}
func TestGetHugePagesMountOptions(t *testing.T) {
testCases := map[string]struct {
pod *v1.Pod
shouldFail bool
expectedResult string
}{
"testWithProperValues": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
},
},
},
},
},
},
shouldFail: false,
expectedResult: "pagesize=2Mi",
},
"testWithProperValuesAndDifferentPageSize": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-1Gi"): resource.MustParse("4Gi"),
},
},
},
},
},
},
shouldFail: false,
expectedResult: "pagesize=1Gi",
},
"InitContainerAndContainerHasProperValues": {
pod: &v1.Pod{
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-1Gi"): resource.MustParse("4Gi"),
},
},
},
},
},
},
shouldFail: false,
expectedResult: "pagesize=1Gi",
},
"InitContainerAndContainerHasDifferentPageSizes": {
pod: &v1.Pod{
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-2Mi"): resource.MustParse("2Gi"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-1Gi"): resource.MustParse("4Gi"),
},
},
},
},
},
},
shouldFail: true,
expectedResult: "",
},
"ContainersWithMultiplePageSizes": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
},
},
},
},
},
},
shouldFail: true,
expectedResult: "",
},
"PodWithNoHugePagesRequest": {
pod: &v1.Pod{},
shouldFail: true,
expectedResult: "",
},
}
for testCaseName, testCase := range testCases {
value, err := getPageSizeMountOptionFromPod(testCase.pod)
if testCase.shouldFail && err == nil {
t.Errorf("Expected an error in %v", testCaseName)
} else if !testCase.shouldFail && err != nil {
t.Errorf("Unexpected error in %v, got %v", testCaseName, err)
} else if testCase.expectedResult != value {
t.Errorf("Unexpected mountOptions for Pod. Expected %v, got %v", testCase.expectedResult, value)
}
}
}

View File

@ -0,0 +1,32 @@
// +build !linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package empty_dir
import (
"k8s.io/kubernetes/pkg/util/mount"
)
// realMountDetector pretends to implement mediumer.
type realMountDetector struct {
mounter mount.Interface
}
func (m *realMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
return mediumUnknown, false, nil
}

65
vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD generated vendored Normal file
View File

@ -0,0 +1,65 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"disk_manager.go",
"doc.go",
"fc.go",
"fc_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/fc",
deps = [
"//pkg/features:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"fc_test.go",
"fc_util_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/fc",
library = ":go_default_library",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

10
vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS generated vendored Normal file
View File

@ -0,0 +1,10 @@
approvers:
- rootfs
- saad-ali
reviewers:
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42
- mtanino

220
vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go generated vendored Normal file
View File

@ -0,0 +1,220 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type fcAttacher struct {
host volume.VolumeHost
manager diskManager
}
var _ volume.Attacher = &fcAttacher{}
var _ volume.AttachableVolumePlugin = &fcPlugin{}
func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) {
return &fcAttacher{
host: plugin.host,
manager: &FCUtil{},
}, nil
}
func (plugin *fcPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
func (attacher *fcAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
return "", nil
}
func (attacher *fcAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
for _, spec := range specs {
volumesAttachedCheck[spec] = true
}
return volumesAttachedCheck, nil
}
func (attacher *fcAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host)
if err != nil {
glog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
return attacher.manager.AttachDisk(*mounter)
}
func (attacher *fcAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host)
if err != nil {
glog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
return attacher.manager.MakeGlobalPDName(*mounter.fcDisk), nil
}
func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter(fcPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if readOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(fcPluginName)}
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
}
}
return nil
}
type fcDetacher struct {
mounter mount.Interface
manager diskManager
}
var _ volume.Detacher = &fcDetacher{}
func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) {
return &fcDetacher{
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
manager: &FCUtil{},
}, nil
}
func (detacher *fcDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}
func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error {
// Specify device name for DetachDisk later
devName, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
if err != nil {
glog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err)
return err
}
// Unmount for deviceMountPath(=globalPDPath)
err = volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
if err != nil {
return fmt.Errorf("fc: failed to unmount: %s\nError: %v", deviceMountPath, err)
}
unMounter := volumeSpecToUnmounter(detacher.mounter)
err = detacher.manager.DetachDisk(*unMounter, devName)
if err != nil {
return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", devName, err)
}
glog.V(4).Infof("fc: successfully detached disk: %s", devName)
return nil
}
func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMounter, error) {
fc, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
var lun string
var wwids []string
if fc.Lun != nil && len(fc.TargetWWNs) != 0 {
lun = strconv.Itoa(int(*fc.Lun))
} else if len(fc.WWIDs) != 0 {
for _, wwid := range fc.WWIDs {
wwids = append(wwids, strings.Replace(wwid, " ", "_", -1))
}
} else {
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mounter")
}
fcDisk := &fcDisk{
plugin: &fcPlugin{
host: host,
},
wwns: fc.TargetWWNs,
lun: lun,
wwids: wwids,
io: &osIOHandler{},
}
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode, err := volumehelper.GetVolumeMode(spec)
if err != nil {
return nil, err
}
glog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode)
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
volumeMode: volumeMode,
readOnly: readOnly,
mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host),
}, nil
}
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
readOnly: readOnly,
mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host),
}, nil
}
func volumeSpecToUnmounter(mounter mount.Interface) *fcDiskUnmounter {
return &fcDiskUnmounter{
fcDisk: &fcDisk{
io: &osIOHandler{},
},
mounter: mounter,
}
}

92
vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"os"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
// Abstract interface to disk operations.
type diskManager interface {
MakeGlobalPDName(disk fcDisk) string
MakeGlobalVDPDName(disk fcDisk) string
// Attaches the disk to the kubelet's host machine.
AttachDisk(b fcDiskMounter) (string, error)
// Detaches the disk from the kubelet's host machine.
DetachDisk(disk fcDiskUnmounter, devName string) error
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
globalPDPath := manager.MakeGlobalPDName(*b.fcDisk)
noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if !noMnt {
return nil
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
err = mounter.Mount(globalPDPath, volPath, "", options)
if err != nil {
glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !noMnt {
if mntErr = b.mounter.Unmount(volPath); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
noMnt, mntErr = b.mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !noMnt {
// will most likely retry on next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath)
return err
}
}
os.Remove(volPath)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(&b, fsGroup)
}
return nil
}

19
vendor/k8s.io/kubernetes/pkg/volume/fc/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fc contains the internal representation of
// Fibre Channel (fc) volumes.
package fc // import "k8s.io/kubernetes/pkg/volume/fc"

465
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go generated vendored Normal file
View File

@ -0,0 +1,465 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&fcPlugin{nil}}
}
type fcPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &fcPlugin{}
var _ volume.PersistentVolumePlugin = &fcPlugin{}
var _ volume.BlockVolumePlugin = &fcPlugin{}
const (
fcPluginName = "kubernetes.io/fc"
)
func (plugin *fcPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *fcPlugin) GetPluginName() string {
return fcPluginName
}
func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
// API server validates these parameters beforehand but attach/detach
// controller creates volumespec without validation. They may be nil
// or zero length. We should check again to avoid unexpected conditions.
if len(volumeSource.TargetWWNs) != 0 && volumeSource.Lun != nil {
// TargetWWNs are the FibreChannel target worldwide names
return fmt.Sprintf("%v:%v", volumeSource.TargetWWNs, *volumeSource.Lun), nil
} else if len(volumeSource.WWIDs) != 0 {
// WWIDs are the FibreChannel World Wide Identifiers
return fmt.Sprintf("%v", volumeSource.WWIDs), nil
}
return "", err
}
func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool {
if (spec.Volume != nil && spec.Volume.FC == nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FC == nil) {
return false
}
return true
}
func (plugin *fcPlugin) RequiresRemount() bool {
return false
}
func (plugin *fcPlugin) SupportsMountOption() bool {
return false
}
func (plugin *fcPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
fc, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
wwns, lun, wwids, err := getWwnsLunWwids(fc)
if err != nil {
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mounter")
}
fcDisk := &fcDisk{
podUID: podUID,
volName: spec.Name(),
wwns: wwns,
lun: lun,
wwids: wwids,
manager: manager,
io: &osIOHandler{},
plugin: plugin,
}
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode, err := volumehelper.GetVolumeMode(spec)
if err != nil {
return nil, err
}
glog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode)
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
volumeMode: volumeMode,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
}, nil
}
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
}, nil
}
func (plugin *fcPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
// If this called via GenerateUnmapDeviceFunc(), pod is nil.
// Pass empty string as dummy uid since uid isn't used in the case.
var uid types.UID
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) {
fc, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
wwns, lun, wwids, err := getWwnsLunWwids(fc)
if err != nil {
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mapper")
}
return &fcDiskMapper{
fcDisk: &fcDisk{
podUID: podUID,
volName: spec.Name(),
wwns: wwns,
lun: lun,
wwids: wwids,
manager: manager,
io: &osIOHandler{},
plugin: plugin},
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
}, nil
}
func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
return &fcDiskUnmounter{
fcDisk: &fcDisk{
podUID: podUID,
volName: volName,
manager: manager,
plugin: plugin,
io: &osIOHandler{},
},
mounter: mounter,
}, nil
}
func (plugin *fcPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &FCUtil{})
}
func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) {
return &fcDiskUnmapper{
fcDisk: &fcDisk{
podUID: podUID,
volName: volName,
manager: manager,
plugin: plugin,
io: &osIOHandler{},
},
}, nil
}
func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
fcVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{},
},
}
return volume.NewSpecFromVolume(fcVolume), nil
}
// ConstructBlockVolumeSpec creates a new volume.Spec with following steps.
// - Searchs a file whose name is {pod uuid} under volume plugin directory.
// - If a file is found, then retreives volumePluginDependentPath from globalMapPathUUID.
// - Once volumePluginDependentPath is obtained, store volume information to VolumeSource
// examples:
// mapPath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid}
func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName)
blkutil := util.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
return nil, err
}
glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
// Retreive volumePluginDependentPath from globalMapPathUUID
// globalMapPathUUID examples:
// wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/{pod uuid}
// wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/{pod uuid}
arr := strings.Split(globalMapPathUUID, "/")
if len(arr) < 2 {
return nil, fmt.Errorf("Fail to retreive volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
}
l := len(arr) - 2
volumeInfo := arr[l]
// Create volume from wwn+lun or wwid
var fcPV *v1.PersistentVolume
if strings.Contains(volumeInfo, "-lun-") {
wwnLun := strings.Split(volumeInfo, "-lun-")
lun, err := strconv.Atoi(wwnLun[1])
if err != nil {
return nil, err
}
lun32 := int32(lun)
fcPV = createPersistentVolumeFromFCVolumeSource(volumeName,
v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32})
glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v",
fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs,
fcPV.Spec.PersistentVolumeSource.FC.Lun)
} else {
fcPV = createPersistentVolumeFromFCVolumeSource(volumeName,
v1.FCVolumeSource{WWIDs: []string{volumeInfo}})
glog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs)
}
return volume.NewSpecFromPersistentVolume(fcPV, false), nil
}
type fcDisk struct {
volName string
podUID types.UID
portal string
wwns []string
lun string
wwids []string
plugin *fcPlugin
// Utility interface that provides API calls to the provider to attach/detach disks.
manager diskManager
// io handler interface
io ioHandler
volume.MetricsNil
}
func (fc *fcDisk) GetPath() string {
name := fcPluginName
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
return fc.plugin.host.GetPodVolumeDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(name), fc.volName)
}
func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, fc.plugin.host)
if err != nil {
glog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
return fc.manager.MakeGlobalVDPDName(*mounter.fcDisk), nil
}
func (fc *fcDisk) fcPodDeviceMapPath() (string, string) {
name := fcPluginName
return fc.plugin.host.GetPodVolumeDeviceDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(name)), fc.volName
}
type fcDiskMounter struct {
*fcDisk
readOnly bool
fsType string
volumeMode v1.PersistentVolumeMode
mounter *mount.SafeFormatAndMount
}
var _ volume.Mounter = &fcDiskMounter{}
func (b *fcDiskMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *fcDiskMounter) CanMount() error {
return nil
}
func (b *fcDiskMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
// diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
if err != nil {
glog.Errorf("fc: failed to setup")
}
return err
}
type fcDiskUnmounter struct {
*fcDisk
mounter mount.Interface
}
var _ volume.Unmounter = &fcDiskUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (c *fcDiskUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *fcDiskUnmounter) TearDownAt(dir string) error {
return util.UnmountPath(dir, c.mounter)
}
// Block Volumes Support
type fcDiskMapper struct {
*fcDisk
readOnly bool
mounter mount.Interface
}
var _ volume.BlockVolumeMapper = &fcDiskMapper{}
func (b *fcDiskMapper) SetUpDevice() (string, error) {
return "", nil
}
type fcDiskUnmapper struct {
*fcDisk
}
var _ volume.BlockVolumeUnmapper = &fcDiskUnmapper{}
func (c *fcDiskUnmapper) TearDownDevice(_, devicePath string) error {
// Remove scsi device from the node.
if !strings.HasPrefix(devicePath, "/dev/") {
return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath)
}
arr := strings.Split(devicePath, "/")
dev := arr[len(arr)-1]
removeFromScsiSubsystem(dev, c.io)
return nil
}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{WWID}/{podUid}
func (fc *fcDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
return fc.fcGlobalMapPath(spec)
}
// GetPodDeviceMapPath returns pod device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~fc
// volumeName: pv0001
func (fc *fcDisk) GetPodDeviceMapPath() (string, string) {
return fc.fcPodDeviceMapPath()
}
func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
if spec.Volume != nil && spec.Volume.FC != nil {
return spec.Volume.FC, spec.Volume.FC.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.FC != nil {
return spec.PersistentVolume.Spec.FC, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a FibreChannel volume type")
}
func createPersistentVolumeFromFCVolumeSource(volumeName string, fc v1.FCVolumeSource) *v1.PersistentVolume {
block := v1.PersistentVolumeBlock
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: volumeName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &fc,
},
VolumeMode: &block,
},
}
}
func getWwnsLunWwids(fc *v1.FCVolumeSource) ([]string, string, []string, error) {
var lun string
var wwids []string
if fc.Lun != nil && len(fc.TargetWWNs) != 0 {
lun = strconv.Itoa(int(*fc.Lun))
return fc.TargetWWNs, lun, wwids, nil
}
if len(fc.WWIDs) != 0 {
for _, wwid := range fc.WWIDs {
wwids = append(wwids, strings.Replace(wwid, " ", "_", -1))
}
return fc.TargetWWNs, lun, wwids, nil
}
return nil, "", nil, fmt.Errorf("fc: no fc disk information found")
}

405
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go generated vendored Normal file
View File

@ -0,0 +1,405 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"os"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/fc" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}
type fakeDiskManager struct {
tmpDir string
attachCalled bool
detachCalled bool
}
func NewFakeDiskManager() *fakeDiskManager {
return &fakeDiskManager{
tmpDir: utiltesting.MkTmpdirOrDie("fc_test"),
}
}
func (fake *fakeDiskManager) Cleanup() {
os.RemoveAll(fake.tmpDir)
}
func (fake *fakeDiskManager) MakeGlobalPDName(disk fcDisk) string {
return fake.tmpDir
}
func (fake *fakeDiskManager) MakeGlobalVDPDName(disk fcDisk) string {
return fake.tmpDir
}
func (fake *fakeDiskManager) AttachDisk(b fcDiskMounter) (string, error) {
globalPath := b.manager.MakeGlobalPDName(*b.fcDisk)
err := os.MkdirAll(globalPath, 0750)
if err != nil {
return "", err
}
// Simulate the global mount so that the fakeMounter returns the
// expected number of mounts for the attached disk.
b.mounter.Mount(globalPath, globalPath, b.fsType, nil)
fake.attachCalled = true
return "", nil
}
func (fake *fakeDiskManager) DetachDisk(c fcDiskUnmounter, mntPath string) error {
globalPath := c.manager.MakeGlobalPDName(*c.fcDisk)
err := os.RemoveAll(globalPath)
if err != nil {
return err
}
fake.detachCalled = true
return nil
}
func doTestPlugin(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter: %v", err)
}
path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fc/vol1", tmpDir)
if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
fakeManager2 := NewFakeDiskManager()
defer fakeManager2.Cleanup()
unmounter, err := plug.(*fcPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter: %v", err)
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}
func doTestPluginNilMounter(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err == nil {
t.Errorf("Error failed to make a new Mounter is expected: %v", err)
}
if mounter != nil {
t.Errorf("A nil Mounter is expected: %v", err)
}
}
func TestPluginVolume(t *testing.T) {
lun := int32(0)
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"500a0981891b8dc5"},
FSType: "ext4",
Lun: &lun,
},
},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
lun := int32(0)
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"500a0981891b8dc5"},
FSType: "ext4",
Lun: &lun,
},
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPluginVolumeWWIDs(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{
WWIDs: []string{"3600508b400105e210000900000490000"},
FSType: "ext4",
},
},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolumeWWIDs(t *testing.T) {
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
WWIDs: []string{"3600508b400105e21 000900000490000"},
FSType: "ext4",
},
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPluginVolumeNoDiskInfo(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{
FSType: "ext4",
},
},
}
doTestPluginNilMounter(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolumeNoDiskInfo(t *testing.T) {
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
FSType: "ext4",
},
},
},
}
doTestPluginNilMounter(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
lun := int32(0)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(fcPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func Test_getWwnsLun(t *testing.T) {
num := int32(0)
fc := &v1.FCVolumeSource{
TargetWWNs: []string{"500a0981891b8dc5"},
FSType: "ext4",
Lun: &num,
}
wwn, lun, _, err := getWwnsLunWwids(fc)
// if no wwn and lun, exit
if (len(wwn) == 0 && lun != "0") || err != nil {
t.Errorf("no fc disk found")
}
}
func Test_getWwids(t *testing.T) {
fc := &v1.FCVolumeSource{
FSType: "ext4",
WWIDs: []string{"3600508b400105e210000900000490000"},
}
_, _, wwid, err := getWwnsLunWwids(fc)
// if no wwn and lun, exit
if len(wwid) == 0 || err != nil {
t.Errorf("no fc disk found")
}
}
func Test_getWwnsLunWwidsError(t *testing.T) {
fc := &v1.FCVolumeSource{
FSType: "ext4",
}
wwn, lun, wwid, err := getWwnsLunWwids(fc)
// expected no wwn and lun and wwid
if (len(wwn) != 0 && lun != "" && len(wwid) != 0) || err == nil {
t.Errorf("unexpected fc disk found")
}
}

277
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go generated vendored Normal file
View File

@ -0,0 +1,277 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
)
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
EvalSymlinks(path string) (string, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
}
type osIOHandler struct{}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(name)
}
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
return filepath.EvalSymlinks(path)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
// given a disk path like /dev/sdx, find the devicemapper parent
// TODO #23192 Convert this code to use the generic code in ../util
// which is used by the iSCSI implementation
func findMultipathDeviceMapper(disk string, io ioHandler) string {
sys_path := "/sys/block/"
if dirs, err := io.ReadDir(sys_path); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.HasPrefix(name, "dm-") {
if _, err1 := io.Lstat(sys_path + name + "/slaves/" + disk); err1 == nil {
return "/dev/" + name
}
}
}
}
return ""
}
// given a wwn and lun, find the device and associated devicemapper parent
func findDisk(wwn, lun string, io ioHandler) (string, string) {
fc_path := "-fc-0x" + wwn + "-lun-" + lun
dev_path := "/dev/disk/by-path/"
if dirs, err := io.ReadDir(dev_path); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.Contains(name, fc_path) {
if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil {
arr := strings.Split(disk, "/")
l := len(arr) - 1
dev := arr[l]
dm := findMultipathDeviceMapper(dev, io)
return disk, dm
}
}
}
}
return "", ""
}
// given a wwid, find the device and associated devicemapper parent
func findDiskWWIDs(wwid string, io ioHandler) (string, string) {
// Example wwid format:
// 3600508b400105e210000900000490000
// <VENDOR NAME> <IDENTIFIER NUMBER>
// Example of symlink under by-id:
// /dev/by-id/scsi-3600508b400105e210000900000490000
// /dev/by-id/scsi-<VENDOR NAME>_<IDENTIFIER NUMBER>
// The wwid could contain white space and it will be replaced
// underscore when wwid is exposed under /dev/by-id.
fc_path := "scsi-" + wwid
dev_id := "/dev/disk/by-id/"
if dirs, err := io.ReadDir(dev_id); err == nil {
for _, f := range dirs {
name := f.Name()
if name == fc_path {
disk, err := io.EvalSymlinks(dev_id + name)
if err != nil {
glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", dev_id+name, err)
return "", ""
}
arr := strings.Split(disk, "/")
l := len(arr) - 1
dev := arr[l]
dm := findMultipathDeviceMapper(dev, io)
return disk, dm
}
}
}
glog.V(2).Infof("fc: failed to find a disk [%s]", dev_id+fc_path)
return "", ""
}
// Removes a scsi device based upon /dev/sdX name
func removeFromScsiSubsystem(deviceName string, io ioHandler) {
fileName := "/sys/block/" + deviceName + "/device/delete"
glog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName)
data := []byte("1")
io.WriteFile(fileName, data, 0666)
}
// rescan scsi bus
func scsiHostRescan(io ioHandler) {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
io.WriteFile(name, data, 0666)
}
}
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/target-lun-0
func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
if len(wwns) != 0 {
return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
} else {
return path.Join(host.GetPluginDir(fcPluginName), wwids[0])
}
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/target-lun-0
func makeVDPDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
if len(wwns) != 0 {
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
} else {
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0])
}
}
type FCUtil struct{}
func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string {
return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
}
// Global volume device plugin dir
func (util *FCUtil) MakeGlobalVDPDName(fc fcDisk) string {
return makeVDPDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
}
func searchDisk(b fcDiskMounter) (string, error) {
var diskIds []string
var disk string
var dm string
io := b.io
wwids := b.wwids
wwns := b.wwns
lun := b.lun
if len(wwns) != 0 {
diskIds = wwns
} else {
diskIds = wwids
}
rescaned := false
// two-phase search:
// first phase, search existing device path, if a multipath dm is found, exit loop
// otherwise, in second phase, rescan scsi bus and search again, return with any findings
for true {
for _, diskId := range diskIds {
if len(wwns) != 0 {
disk, dm = findDisk(diskId, lun, io)
} else {
disk, dm = findDiskWWIDs(diskId, io)
}
// if multipath device is found, break
if dm != "" {
break
}
}
// if a dm is found, exit loop
if rescaned || dm != "" {
break
}
// rescan and search again
// rescan scsi bus
scsiHostRescan(io)
rescaned = true
}
// if no disk matches input wwn and lun, exit
if disk == "" && dm == "" {
return "", fmt.Errorf("no fc disk found")
}
// if multipath devicemapper device is found, use it; otherwise use raw disk
if dm != "" {
return dm, nil
}
return disk, nil
}
func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
devicePath, err := searchDisk(b)
if err != nil {
return "", err
}
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
// If the volumeMode is 'Block', plugin don't have to format the volume.
// The globalPDPath will be created by operationexecutor. Just return devicePath here.
glog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath)
if b.volumeMode == v1.PersistentVolumeBlock {
return devicePath, nil
}
}
// mount it
globalPDPath := util.MakeGlobalPDName(*b.fcDisk)
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return devicePath, fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath)
}
noMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
return devicePath, fmt.Errorf("Heuristic determination of mount point failed:%v", err)
}
if !noMnt {
glog.Infof("fc: %s already mounted", globalPDPath)
return devicePath, nil
}
err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil)
if err != nil {
return devicePath, fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
}
return devicePath, err
}
func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devName string) error {
// Remove scsi device from the node.
if !strings.HasPrefix(devName, "/dev/") {
return fmt.Errorf("fc detach disk: invalid device name: %s", devName)
}
arr := strings.Split(devName, "/")
dev := arr[len(arr)-1]
removeFromScsiSubsystem(dev, c.io)
return nil
}

114
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util_test.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"os"
"testing"
"time"
)
type fakeFileInfo struct {
name string
}
func (fi *fakeFileInfo) Name() string {
return fi.name
}
func (fi *fakeFileInfo) Size() int64 {
return 0
}
func (fi *fakeFileInfo) Mode() os.FileMode {
return 777
}
func (fi *fakeFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *fakeFileInfo) IsDir() bool {
return false
}
func (fi *fakeFileInfo) Sys() interface{} {
return nil
}
type fakeIOHandler struct{}
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/dev/disk/by-path/":
f := &fakeFileInfo{
name: "pci-0000:41:00.0-fc-0x500a0981891b8dc5-lun-0",
}
return []os.FileInfo{f}, nil
case "/sys/block/":
f := &fakeFileInfo{
name: "dm-1",
}
return []os.FileInfo{f}, nil
case "/dev/disk/by-id/":
f := &fakeFileInfo{
name: "scsi-3600508b400105e210000900000490000",
}
return []os.FileInfo{f}, nil
}
return nil, nil
}
func (handler *fakeIOHandler) Lstat(name string) (os.FileInfo, error) {
return nil, nil
}
func (handler *fakeIOHandler) EvalSymlinks(path string) (string, error) {
return "/dev/sda", nil
}
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return nil
}
func TestSearchDisk(t *testing.T) {
fakeMounter := fcDiskMounter{
fcDisk: &fcDisk{
wwns: []string{"500a0981891b8dc5"},
lun: "0",
io: &fakeIOHandler{},
},
}
devicePath, error := searchDisk(fakeMounter)
// if no disk matches input wwn and lun, exit
if devicePath == "" || error != nil {
t.Errorf("no fc disk found")
}
}
func TestSearchDiskWWID(t *testing.T) {
fakeMounter := fcDiskMounter{
fcDisk: &fcDisk{
wwids: []string{"3600508b400105e210000900000490000"},
io: &fakeIOHandler{},
},
}
devicePath, error := searchDisk(fakeMounter)
// if no disk matches input wwid, exit
if devicePath == "" || error != nil {
t.Errorf("no fc disk found")
}
}

86
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD generated vendored Normal file
View File

@ -0,0 +1,86 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"attacher-defaults.go",
"detacher.go",
"detacher-defaults.go",
"driver-call.go",
"fake_watcher.go",
"mounter.go",
"mounter-defaults.go",
"plugin.go",
"plugin-defaults.go",
"probe.go",
"unmounter.go",
"unmounter-defaults.go",
"util.go",
"volume.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/flexvolume",
deps = [
"//pkg/util/filesystem:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"common_test.go",
"detacher_test.go",
"driver-call_test.go",
"flexvolume_test.go",
"mounter_test.go",
"plugin_test.go",
"probe_test.go",
"unmounter_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/flexvolume",
library = ":go_default_library",
deps = [
"//pkg/util/filesystem:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

15
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS generated vendored Normal file
View File

@ -0,0 +1,15 @@
approvers:
- chakri-nelluri
- saad-ali
- MikaelCluseau
reviewers:
- ivan4th
- rata
- sjenning
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42
- chakri-nelluri
- MikaelCluseau

View File

@ -0,0 +1,64 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
type attacherDefaults flexVolumeAttacher
// Attach is part of the volume.Attacher interface
func (a *attacherDefaults) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name, ", host ", hostName)
return "", nil
}
// WaitForAttach is part of the volume.Attacher interface
func (a *attacherDefaults) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name, ", device ", devicePath)
return devicePath, nil
}
// GetDeviceMountPath is part of the volume.Attacher interface
func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir string) (string, error) {
return a.plugin.getDeviceMountPath(spec)
}
// MountDevice is part of the volume.Attacher interface
func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name, ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
volSource, readOnly := getVolumeSource(spec)
options := make([]string, 0)
if readOnly {
options = append(options, "ro")
} else {
options = append(options, "rw")
}
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: a.plugin.host.GetExec(a.plugin.GetPluginName())}
return diskMounter.FormatAndMount(devicePath, deviceMountPath, volSource.FSType, options)
}

View File

@ -0,0 +1,121 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
)
type flexVolumeAttacher struct {
plugin *flexVolumeAttachablePlugin
}
var _ volume.Attacher = &flexVolumeAttacher{}
// Attach is part of the volume.Attacher interface
func (a *flexVolumeAttacher) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {
call := a.plugin.NewDriverCall(attachCmd)
call.AppendSpec(spec, a.plugin.host, nil)
call.Append(string(hostName))
status, err := call.Run()
if isCmdNotSupportedErr(err) {
return (*attacherDefaults)(a).Attach(spec, hostName)
} else if err != nil {
return "", err
}
return status.DevicePath, err
}
// WaitForAttach is part of the volume.Attacher interface
func (a *flexVolumeAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
call := a.plugin.NewDriverCallWithTimeout(waitForAttachCmd, timeout)
call.Append(devicePath)
call.AppendSpec(spec, a.plugin.host, nil)
status, err := call.Run()
if isCmdNotSupportedErr(err) {
return (*attacherDefaults)(a).WaitForAttach(spec, devicePath, timeout)
} else if err != nil {
return "", err
}
return status.DevicePath, nil
}
// GetDeviceMountPath is part of the volume.Attacher interface
func (a *flexVolumeAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
return a.plugin.getDeviceMountPath(spec)
}
// MountDevice is part of the volume.Attacher interface
func (a *flexVolumeAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
// Mount only once.
alreadyMounted, err := prepareForMount(a.plugin.host.GetMounter(a.plugin.GetPluginName()), deviceMountPath)
if err != nil {
return err
}
if alreadyMounted {
return nil
}
call := a.plugin.NewDriverCall(mountDeviceCmd)
call.Append(deviceMountPath)
call.Append(devicePath)
call.AppendSpec(spec, a.plugin.host, nil)
_, err = call.Run()
if isCmdNotSupportedErr(err) {
// Devicepath is empty if the plugin does not support attach calls. Ignore mountDevice calls if the
// plugin does not implement attach interface.
if devicePath != "" {
return (*attacherDefaults)(a).MountDevice(spec, devicePath, deviceMountPath, a.plugin.host.GetMounter(a.plugin.GetPluginName()))
} else {
return nil
}
}
return err
}
func (a *flexVolumeAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
for _, spec := range specs {
volumesAttachedCheck[spec] = true
call := a.plugin.NewDriverCall(isAttached)
call.AppendSpec(spec, a.plugin.host, nil)
call.Append(string(nodeName))
status, err := call.Run()
if isCmdNotSupportedErr(err) {
return nil, nil
} else if err == nil {
if !status.Attached {
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name())
}
} else {
return nil, err
}
}
return volumesAttachedCheck, nil
}

View File

@ -0,0 +1,76 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
)
func TestAttach(t *testing.T) {
spec := fakeVolumeSpec()
plugin, _ := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), attachCmd,
specJson(plugin, spec, nil), "localhost"),
)
a, _ := plugin.NewAttacher()
a.Attach(spec, "localhost")
}
func TestWaitForAttach(t *testing.T) {
spec := fakeVolumeSpec()
var pod *v1.Pod
plugin, _ := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), waitForAttachCmd, "/dev/sdx",
specJson(plugin, spec, nil)),
)
a, _ := plugin.NewAttacher()
a.WaitForAttach(spec, "/dev/sdx", pod, 1*time.Second)
}
func TestMountDevice(t *testing.T) {
spec := fakeVolumeSpec()
plugin, rootDir := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), mountDeviceCmd, rootDir+"/mount-dir", "/dev/sdx",
specJson(plugin, spec, nil)),
)
a, _ := plugin.NewAttacher()
a.MountDevice(spec, "/dev/sdx", rootDir+"/mount-dir")
}
func TestIsVolumeAttached(t *testing.T) {
spec := fakeVolumeSpec()
plugin, _ := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), isAttached, specJson(plugin, spec, nil), "localhost"),
)
a, _ := plugin.NewAttacher()
specs := []*volume.Spec{spec}
a.VolumesAreAttached(specs, "localhost")
}

View File

@ -0,0 +1,142 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"encoding/json"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
func testPlugin() (*flexVolumeAttachablePlugin, string) {
rootDir, err := utiltesting.MkTmpdir("flexvolume_test")
if err != nil {
panic("error creating temp dir: " + err.Error())
}
return &flexVolumeAttachablePlugin{
flexVolumePlugin: &flexVolumePlugin{
driverName: "test",
execPath: "/plugin",
host: volumetesting.NewFakeVolumeHost(rootDir, nil, nil),
unsupportedCommands: []string{},
},
}, rootDir
}
func assertDriverCall(t *testing.T, output fakeexec.FakeCombinedOutputAction, expectedCommand string, expectedArgs ...string) fakeexec.FakeCommandAction {
return func(cmd string, args ...string) exec.Cmd {
if cmd != "/plugin/test" {
t.Errorf("Wrong executable called: got %v, expected %v", cmd, "/plugin/test")
}
if args[0] != expectedCommand {
t.Errorf("Wrong command called: got %v, expected %v", args[0], expectedCommand)
}
cmdArgs := args[1:]
if !sameArgs(cmdArgs, expectedArgs) {
t.Errorf("Wrong args for %s: got %v, expected %v", args[0], cmdArgs, expectedArgs)
}
return &fakeexec.FakeCmd{
Argv: args,
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{output},
}
}
}
func fakeRunner(fakeCommands ...fakeexec.FakeCommandAction) exec.Interface {
return &fakeexec.FakeExec{
CommandScript: fakeCommands,
}
}
func fakeResultOutput(result interface{}) fakeexec.FakeCombinedOutputAction {
return func() ([]byte, error) {
bytes, err := json.Marshal(result)
if err != nil {
panic("Unable to marshal result: " + err.Error())
}
return bytes, nil
}
}
func successOutput() fakeexec.FakeCombinedOutputAction {
return fakeResultOutput(&DriverStatus{StatusSuccess, "", "", "", true, nil})
}
func notSupportedOutput() fakeexec.FakeCombinedOutputAction {
return fakeResultOutput(&DriverStatus{StatusNotSupported, "", "", "", false, nil})
}
func sameArgs(args, expectedArgs []string) bool {
if len(args) != len(expectedArgs) {
return false
}
for i, v := range args {
if v != expectedArgs[i] {
return false
}
}
return true
}
func fakeVolumeSpec() *volume.Spec {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
FlexVolume: &v1.FlexVolumeSource{
Driver: "kubernetes.io/fakeAttacher",
ReadOnly: false,
},
},
}
return volume.NewSpecFromVolume(vol)
}
func fakePersistentVolumeSpec() *volume.Spec {
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexVolumeSource{
Driver: "kubernetes.io/fakeAttacher",
ReadOnly: false,
},
},
},
}
return volume.NewSpecFromPersistentVolume(vol, false)
}
func specJson(plugin *flexVolumeAttachablePlugin, spec *volume.Spec, extraOptions map[string]string) string {
o, err := NewOptionsForDriver(spec, plugin.host, extraOptions)
if err != nil {
panic("Failed to convert spec: " + err.Error())
}
bytes, err := json.Marshal(o)
if err != nil {
panic("Unable to marshal result: " + err.Error())
}
return string(bytes)
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume/util"
)
type detacherDefaults flexVolumeDetacher
// Detach is part of the volume.Detacher interface.
func (d *detacherDefaults) Detach(volumeName string, hostName types.NodeName) error {
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName)
return nil
}
// WaitForDetach is part of the volume.Detacher interface.
func (d *detacherDefaults) WaitForDetach(devicePath string, timeout time.Duration) error {
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default WaitForDetach for device ", devicePath)
return nil
}
// UnmountDevice is part of the volume.Detacher interface.
func (d *detacherDefaults) UnmountDevice(deviceMountPath string) error {
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath)
return util.UnmountPath(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()))
}

View File

@ -0,0 +1,98 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"fmt"
"os"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
type flexVolumeDetacher struct {
plugin *flexVolumeAttachablePlugin
}
var _ volume.Detacher = &flexVolumeDetacher{}
// Detach is part of the volume.Detacher interface.
func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName) error {
call := d.plugin.NewDriverCall(detachCmd)
call.Append(volumeName)
call.Append(string(hostName))
_, err := call.Run()
if isCmdNotSupportedErr(err) {
return (*detacherDefaults)(d).Detach(volumeName, hostName)
}
return err
}
// WaitForDetach is part of the volume.Detacher interface.
func (d *flexVolumeDetacher) WaitForDetach(devicePath string, timeout time.Duration) error {
call := d.plugin.NewDriverCallWithTimeout(waitForDetachCmd, timeout)
call.Append(devicePath)
_, err := call.Run()
if isCmdNotSupportedErr(err) {
return (*detacherDefaults)(d).WaitForDetach(devicePath, timeout)
}
return err
}
// UnmountDevice is part of the volume.Detacher interface.
func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error {
if pathExists, pathErr := util.PathExists(deviceMountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
return nil
}
notmnt, err := isNotMounted(d.plugin.host.GetMounter(d.plugin.GetPluginName()), deviceMountPath)
if err != nil {
return err
}
if notmnt {
glog.Warningf("Warning: Path: %v already unmounted", deviceMountPath)
} else {
call := d.plugin.NewDriverCall(unmountDeviceCmd)
call.Append(deviceMountPath)
_, err := call.Run()
if isCmdNotSupportedErr(err) {
err = (*detacherDefaults)(d).UnmountDevice(deviceMountPath)
}
if err != nil {
return err
}
}
// Flexvolume driver may remove the directory. Ignore if it does.
if pathExists, pathErr := util.PathExists(deviceMountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
return nil
}
return os.Remove(deviceMountPath)
}

View File

@ -0,0 +1,43 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"testing"
)
func TestDetach(t *testing.T) {
plugin, _ := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), detachCmd,
"sdx", "localhost"),
)
d, _ := plugin.NewDetacher()
d.Detach("sdx", "localhost")
}
func TestUnmountDevice(t *testing.T) {
plugin, rootDir := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), unmountDeviceCmd,
rootDir+"/mount-dir"),
)
d, _ := plugin.NewDetacher()
d.UnmountDevice(rootDir + "/mount-dir")
}

View File

@ -0,0 +1,249 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"encoding/json"
"errors"
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/volume"
)
const (
// Driver calls
initCmd = "init"
getVolumeNameCmd = "getvolumename"
isAttached = "isattached"
attachCmd = "attach"
waitForAttachCmd = "waitforattach"
mountDeviceCmd = "mountdevice"
detachCmd = "detach"
waitForDetachCmd = "waitfordetach"
unmountDeviceCmd = "unmountdevice"
mountCmd = "mount"
unmountCmd = "unmount"
// Option keys
optionFSType = "kubernetes.io/fsType"
optionReadWrite = "kubernetes.io/readwrite"
optionKeySecret = "kubernetes.io/secret"
optionFSGroup = "kubernetes.io/fsGroup"
optionMountsDir = "kubernetes.io/mountsDir"
optionPVorVolumeName = "kubernetes.io/pvOrVolumeName"
optionKeyPodName = "kubernetes.io/pod.name"
optionKeyPodNamespace = "kubernetes.io/pod.namespace"
optionKeyPodUID = "kubernetes.io/pod.uid"
optionKeyServiceAccountName = "kubernetes.io/serviceAccount.name"
)
const (
// StatusSuccess represents the successful completion of command.
StatusSuccess = "Success"
// StatusNotSupported represents that the command is not supported.
StatusNotSupported = "Not supported"
)
var (
TimeoutError = fmt.Errorf("Timeout")
)
// DriverCall implements the basic contract between FlexVolume and its driver.
// The caller is responsible for providing the required args.
type DriverCall struct {
Command string
Timeout time.Duration
plugin *flexVolumePlugin
args []string
}
func (plugin *flexVolumePlugin) NewDriverCall(command string) *DriverCall {
return plugin.NewDriverCallWithTimeout(command, 0)
}
func (plugin *flexVolumePlugin) NewDriverCallWithTimeout(command string, timeout time.Duration) *DriverCall {
return &DriverCall{
Command: command,
Timeout: timeout,
plugin: plugin,
args: []string{command},
}
}
func (dc *DriverCall) Append(arg string) {
dc.args = append(dc.args, arg)
}
func (dc *DriverCall) AppendSpec(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) error {
optionsForDriver, err := NewOptionsForDriver(spec, host, extraOptions)
if err != nil {
return err
}
jsonBytes, err := json.Marshal(optionsForDriver)
if err != nil {
return fmt.Errorf("Failed to marshal spec, error: %s", err.Error())
}
dc.Append(string(jsonBytes))
return nil
}
func (dc *DriverCall) Run() (*DriverStatus, error) {
if dc.plugin.isUnsupported(dc.Command) {
return nil, errors.New(StatusNotSupported)
}
execPath := dc.plugin.getExecutable()
cmd := dc.plugin.runner.Command(execPath, dc.args...)
timeout := false
if dc.Timeout > 0 {
timer := time.AfterFunc(dc.Timeout, func() {
timeout = true
cmd.Stop()
})
defer timer.Stop()
}
output, execErr := cmd.CombinedOutput()
if execErr != nil {
if timeout {
return nil, TimeoutError
}
_, err := handleCmdResponse(dc.Command, output)
if err == nil {
glog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", execPath, execErr)
return nil, execErr
}
if isCmdNotSupportedErr(err) {
dc.plugin.unsupported(dc.Command)
} else {
glog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output)
}
return nil, err
}
status, err := handleCmdResponse(dc.Command, output)
if err != nil {
if isCmdNotSupportedErr(err) {
dc.plugin.unsupported(dc.Command)
}
return nil, err
}
return status, nil
}
// OptionsForDriver represents the spec given to the driver.
type OptionsForDriver map[string]string
func NewOptionsForDriver(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) (OptionsForDriver, error) {
volSource, readOnly := getVolumeSource(spec)
options := map[string]string{}
options[optionFSType] = volSource.FSType
if readOnly {
options[optionReadWrite] = "ro"
} else {
options[optionReadWrite] = "rw"
}
options[optionPVorVolumeName] = spec.Name()
for key, value := range extraOptions {
options[key] = value
}
for key, value := range volSource.Options {
options[key] = value
}
return OptionsForDriver(options), nil
}
// DriverStatus represents the return value of the driver callout.
type DriverStatus struct {
// Status of the callout. One of "Success", "Failure" or "Not supported".
Status string `json:"status"`
// Reason for success/failure.
Message string `json:"message,omitempty"`
// Path to the device attached. This field is valid only for attach calls.
// ie: /dev/sdx
DevicePath string `json:"device,omitempty"`
// Cluster wide unique name of the volume.
VolumeName string `json:"volumeName,omitempty"`
// Represents volume is attached on the node
Attached bool `json:"attached,omitempty"`
// Returns capabilities of the driver.
// By default we assume all the capabilities are supported.
// If the plugin does not support a capability, it can return false for that capability.
Capabilities *DriverCapabilities `json:",omitempty"`
}
type DriverCapabilities struct {
Attach bool `json:"attach"`
SELinuxRelabel bool `json:"selinuxRelabel"`
}
func defaultCapabilities() *DriverCapabilities {
return &DriverCapabilities{
Attach: true,
SELinuxRelabel: true,
}
}
// isCmdNotSupportedErr checks if the error corresponds to command not supported by
// driver.
func isCmdNotSupportedErr(err error) bool {
if err != nil && err.Error() == StatusNotSupported {
return true
}
return false
}
// handleCmdResponse processes the command output and returns the appropriate
// error code or message.
func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) {
status := DriverStatus{
Capabilities: defaultCapabilities(),
}
if err := json.Unmarshal(output, &status); err != nil {
glog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error())
return nil, err
} else if status.Status == StatusNotSupported {
glog.V(5).Infof("%s command is not supported by the driver", cmd)
return nil, errors.New(status.Status)
} else if status.Status != StatusSuccess {
errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message)
glog.Errorf(errMsg)
return nil, fmt.Errorf("%s", errMsg)
}
return &status, nil
}

View File

@ -0,0 +1,32 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"testing"
)
func TestHandleResponseDefaults(t *testing.T) {
ds, err := handleCmdResponse("test", []byte(`{"status": "Success"}`))
if err != nil {
t.Error("error: ", err)
}
if *ds.Capabilities != *defaultCapabilities() {
t.Error("wrong default capabilities: ", *ds.Capabilities)
}
}

View File

@ -0,0 +1,53 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"github.com/fsnotify/fsnotify"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
)
// Mock filesystem watcher
type fakeWatcher struct {
watches []string // List of watches added by the prober, ordered from least recent to most recent.
eventHandler utilfs.FSEventHandler
}
var _ utilfs.FSWatcher = &fakeWatcher{}
func NewFakeWatcher() *fakeWatcher {
return &fakeWatcher{
watches: nil,
}
}
func (w *fakeWatcher) Init(eventHandler utilfs.FSEventHandler, _ utilfs.FSErrorHandler) error {
w.eventHandler = eventHandler
return nil
}
func (w *fakeWatcher) Run() { /* no-op */ }
func (w *fakeWatcher) AddWatch(path string) error {
w.watches = append(w.watches, path)
return nil
}
// Triggers a mock filesystem event.
func (w *fakeWatcher) TriggerEvent(op fsnotify.Op, filename string) {
w.eventHandler(fsnotify.Event{Op: op, Name: filename})
}

View File

@ -0,0 +1,214 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"bytes"
"fmt"
"os"
"path"
"testing"
"text/template"
"k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const execScriptTempl1 = `#!/bin/bash
if [ "$1" == "init" -a $# -eq 1 ]; then
echo -n '{
"status": "Success"
}'
exit 0
fi
PATH=$2
if [ "$1" == "attach" -a $# -eq 2 ]; then
echo -n '{
"device": "{{.DevicePath}}",
"status": "Success"
}'
exit 0
elif [ "$1" == "detach" -a $# -eq 2 ]; then
echo -n '{
"status": "Success"
}'
exit 0
elif [ "$1" == "getvolumename" -a $# -eq 4 ]; then
echo -n '{
"status": "Success",
"volume": "fakevolume"
}'
exit 0
elif [ "$1" == "isattached" -a $# -eq 2 ]; then
echo -n '{
"status": "Success",
"attached": true
}'
exit 0
fi
echo -n '{
"status": "Not supported"
}'
exit 1
# Direct the arguments to a file to be tested against later
echo -n $@ &> {{.OutputFile}}
`
const execScriptTempl2 = `#!/bin/bash
if [ "$1" == "init" -a $# -eq 1 ]; then
echo -n '{
"status": "Success"
}'
exit 0
fi
if [ "$1" == "getvolumename" -a $# -eq 2 ]; then
echo -n '{
"status": "Success",
"volumeName": "fakevolume"
}'
exit 0
elif [ "$1" == "mount" -a $# -eq 4 ]; then
PATH=$2
/bin/mkdir -p $PATH
if [ $? -ne 0 ]; then
echo -n '{
"status": "Failure",
"reason": "Failed to create $PATH"
}'
exit 1
fi
echo -n '{
"status": "Success"
}'
exit 0
elif [ "$1" == "unmount" -a $# -eq 2 ]; then
PATH=$2
/bin/rm -r $PATH
if [ $? -ne 0 ]; then
echo -n '{
"status": "Failure",
"reason": "Failed to cleanup $PATH"
}'
exit 1
fi
echo -n '{
"status": "Success"
}'
exit 0
fi
echo -n '{
"status": "Not Supported"
}'
exit 1
# Direct the arguments to a file to be tested against later
echo -n $@ &> {{.OutputFile}}
`
func installPluginUnderTest(t *testing.T, vendorName, plugName, tmpDir string, execScriptTempl string, execTemplateData *map[string]interface{}) {
vendoredName := plugName
if vendorName != "" {
vendoredName = fmt.Sprintf("%s~%s", vendorName, plugName)
}
pluginDir := path.Join(tmpDir, vendoredName)
err := os.MkdirAll(pluginDir, 0777)
if err != nil {
t.Errorf("Failed to create plugin: %v", err)
}
pluginExec := path.Join(pluginDir, plugName)
f, err := os.Create(pluginExec)
if err != nil {
t.Errorf("Failed to install plugin")
}
err = f.Chmod(0777)
if err != nil {
t.Errorf("Failed to set exec perms on plugin")
}
if execTemplateData == nil {
execTemplateData = &map[string]interface{}{
"DevicePath": "/dev/sdx",
"OutputFile": path.Join(pluginDir, plugName+".out"),
}
}
tObj := template.Must(template.New("test").Parse(execScriptTempl))
buf := &bytes.Buffer{}
if err := tObj.Execute(buf, *execTemplateData); err != nil {
t.Errorf("Error in executing script template - %v", err)
}
execScript := buf.String()
_, err = f.WriteString(execScript)
if err != nil {
t.Errorf("Failed to write plugin exec")
}
f.Close()
}
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("flexvolume_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir), volumetest.NewFakeVolumeHost("fake", nil, nil))
plugin, err := plugMgr.FindPluginByName("flexvolume-kubernetes.io/fakeAttacher")
if err != nil {
t.Fatalf("Can't find the plugin by name")
}
if plugin.GetPluginName() != "flexvolume-kubernetes.io/fakeAttacher" {
t.Errorf("Wrong name: %s", plugin.GetPluginName())
}
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}) {
t.Errorf("Expected true")
}
if !plugin.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}}) {
t.Errorf("Expected true")
}
if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("flexvolume_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plugin, err := plugMgr.FindPersistentPluginByName("flexvolume-kubernetes.io/fakeAttacher")
if err != nil {
t.Fatalf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plugin.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plugin.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/volume"
)
type mounterDefaults flexVolumeMounter
// SetUpAt is part of the volume.Mounter interface.
// This implementation relies on the attacher's device mount path and does a bind mount to dir.
func (f *mounterDefaults) SetUpAt(dir string, fsGroup *int64) error {
glog.Warning(logPrefix(f.plugin), "using default SetUpAt to ", dir)
src, err := f.plugin.getDeviceMountPath(f.spec)
if err != nil {
return err
}
if err := doMount(f.mounter, src, dir, "auto", []string{"bind"}); err != nil {
return err
}
return nil
}
// Returns the default volume attributes.
func (f *mounterDefaults) GetAttributes() volume.Attributes {
glog.V(5).Infof(logPrefix(f.plugin), "using default GetAttributes")
return volume.Attributes{
ReadOnly: f.readOnly,
Managed: !f.readOnly,
SupportsSELinux: f.flexVolume.plugin.capabilities.SELinuxRelabel,
}
}
func (f *mounterDefaults) CanMount() error {
return nil
}

View File

@ -0,0 +1,110 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"os"
"strconv"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/utils/exec"
)
// FlexVolumeMounter is the disk that will be exposed by this plugin.
type flexVolumeMounter struct {
*flexVolume
// Runner used to setup the volume.
runner exec.Interface
// the considered volume spec
spec *volume.Spec
readOnly bool
volume.MetricsNil
}
var _ volume.Mounter = &flexVolumeMounter{}
// Mounter interface
// SetUp creates new directory.
func (f *flexVolumeMounter) SetUp(fsGroup *int64) error {
return f.SetUpAt(f.GetPath(), fsGroup)
}
// SetUpAt creates new directory.
func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
// Mount only once.
alreadyMounted, err := prepareForMount(f.mounter, dir)
if err != nil {
return err
}
if alreadyMounted {
return nil
}
call := f.plugin.NewDriverCall(mountCmd)
// Interface parameters
call.Append(dir)
extraOptions := make(map[string]string)
// pod metadata
extraOptions[optionKeyPodName] = f.podName
extraOptions[optionKeyPodNamespace] = f.podNamespace
extraOptions[optionKeyPodUID] = string(f.podUID)
// service account metadata
extraOptions[optionKeyServiceAccountName] = f.podServiceAccountName
// Extract secret and pass it as options.
if err := addSecretsToOptions(extraOptions, f.spec, f.podNamespace, f.driverName, f.plugin.host); err != nil {
os.Remove(dir)
return err
}
// Implicit parameters
if fsGroup != nil {
extraOptions[optionFSGroup] = strconv.FormatInt(int64(*fsGroup), 10)
}
call.AppendSpec(f.spec, f.plugin.host, extraOptions)
_, err = call.Run()
if isCmdNotSupportedErr(err) {
err = (*mounterDefaults)(f).SetUpAt(dir, fsGroup)
}
if err != nil {
os.Remove(dir)
return err
}
if !f.readOnly {
volume.SetVolumeOwnership(f, fsGroup)
}
return nil
}
// GetAttributes get the flex volume attributes. The attributes will be queried
// using plugin callout after we finalize the callout syntax.
func (f *flexVolumeMounter) GetAttributes() volume.Attributes {
return (*mounterDefaults)(f).GetAttributes()
}
func (f *flexVolumeMounter) CanMount() error {
return nil
}

View File

@ -0,0 +1,72 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
)
func TestSetUpAt(t *testing.T) {
spec := fakeVolumeSpec()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "my-pod",
Namespace: "my-ns",
UID: types.UID("my-uid"),
},
Spec: v1.PodSpec{
ServiceAccountName: "my-sa",
},
}
mounter := &mount.FakeMounter{}
plugin, rootDir := testPlugin()
plugin.unsupportedCommands = []string{"unsupportedCmd"}
plugin.runner = fakeRunner(
// first call without fsGroup
assertDriverCall(t, successOutput(), mountCmd, rootDir+"/mount-dir",
specJson(plugin, spec, map[string]string{
optionKeyPodName: "my-pod",
optionKeyPodNamespace: "my-ns",
optionKeyPodUID: "my-uid",
optionKeyServiceAccountName: "my-sa",
})),
// second test has fsGroup
assertDriverCall(t, notSupportedOutput(), mountCmd, rootDir+"/mount-dir",
specJson(plugin, spec, map[string]string{
optionFSGroup: "42",
optionKeyPodName: "my-pod",
optionKeyPodNamespace: "my-ns",
optionKeyPodUID: "my-uid",
optionKeyServiceAccountName: "my-sa",
})),
assertDriverCall(t, fakeVolumeNameOutput("sdx"), getVolumeNameCmd,
specJson(plugin, spec, nil)),
)
m, _ := plugin.newMounterInternal(spec, pod, mounter, plugin.runner)
m.SetUpAt(rootDir+"/mount-dir", nil)
fsGroup := int64(42)
m.SetUpAt(rootDir+"/mount-dir", &fsGroup)
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/volume"
)
type pluginDefaults flexVolumePlugin
func logPrefix(plugin *flexVolumePlugin) string {
return "flexVolume driver " + plugin.driverName + ": "
}
func (plugin *pluginDefaults) GetVolumeName(spec *volume.Spec) (string, error) {
glog.Warning(logPrefix((*flexVolumePlugin)(plugin)), "using default GetVolumeName for volume ", spec.Name())
return spec.Name(), nil
}

View File

@ -0,0 +1,266 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"fmt"
"path"
"runtime"
"strings"
"sync"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/utils/exec"
)
const (
flexVolumePluginName = "kubernetes.io/flexvolume"
flexVolumePluginNamePrefix = "flexvolume-"
)
// FlexVolumePlugin object.
type flexVolumePlugin struct {
driverName string
execPath string
host volume.VolumeHost
runner exec.Interface
sync.Mutex
unsupportedCommands []string
capabilities DriverCapabilities
}
type flexVolumeAttachablePlugin struct {
*flexVolumePlugin
}
var _ volume.AttachableVolumePlugin = &flexVolumeAttachablePlugin{}
var _ volume.PersistentVolumePlugin = &flexVolumePlugin{}
type PluginFactory interface {
NewFlexVolumePlugin(pluginDir, driverName string) (volume.VolumePlugin, error)
}
type pluginFactory struct{}
func (pluginFactory) NewFlexVolumePlugin(pluginDir, name string) (volume.VolumePlugin, error) {
execPath := path.Join(pluginDir, name)
driverName := utilstrings.UnescapePluginName(name)
flexPlugin := &flexVolumePlugin{
driverName: driverName,
execPath: execPath,
runner: exec.New(),
unsupportedCommands: []string{},
}
// Initialize the plugin and probe the capabilities
call := flexPlugin.NewDriverCall(initCmd)
ds, err := call.Run()
if err != nil {
return nil, err
}
flexPlugin.capabilities = *ds.Capabilities
if flexPlugin.capabilities.Attach {
// Plugin supports attach/detach, so return flexVolumeAttachablePlugin
return &flexVolumeAttachablePlugin{flexVolumePlugin: flexPlugin}, nil
} else {
return flexPlugin, nil
}
}
// Init is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
// Hardwired 'success' as any errors from calling init() will be caught by NewFlexVolumePlugin()
return nil
}
func (plugin *flexVolumePlugin) getExecutable() string {
parts := strings.Split(plugin.driverName, "/")
execName := parts[len(parts)-1]
execPath := path.Join(plugin.execPath, execName)
if runtime.GOOS == "windows" {
execPath = volume.GetWindowsPath(execPath)
}
return execPath
}
// Name is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) GetPluginName() string {
return flexVolumePluginNamePrefix + plugin.driverName
}
// GetVolumeName is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
call := plugin.NewDriverCall(getVolumeNameCmd)
call.AppendSpec(spec, plugin.host, nil)
_, err := call.Run()
if isCmdNotSupportedErr(err) {
return (*pluginDefaults)(plugin).GetVolumeName(spec)
} else if err != nil {
return "", err
}
name, err := (*pluginDefaults)(plugin).GetVolumeName(spec)
if err != nil {
return "", err
}
glog.Warning(logPrefix(plugin), "GetVolumeName is not supported yet. Defaulting to PV or volume name: ", name)
return name, nil
}
// CanSupport is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) CanSupport(spec *volume.Spec) bool {
source, _ := getVolumeSource(spec)
return (source != nil) && (source.Driver == plugin.driverName)
}
// RequiresRemount is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) RequiresRemount() bool {
return false
}
// GetAccessModes gets the allowed access modes for this plugin.
func (plugin *flexVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
}
}
// NewMounter is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(plugin.GetPluginName()), plugin.runner)
}
// newMounterInternal is the internal mounter routine to build the volume.
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, runner exec.Interface) (volume.Mounter, error) {
source, readOnly := getVolumeSource(spec)
return &flexVolumeMounter{
flexVolume: &flexVolume{
driverName: source.Driver,
execPath: plugin.getExecutable(),
mounter: mounter,
plugin: plugin,
podName: pod.Name,
podUID: pod.UID,
podNamespace: pod.Namespace,
podServiceAccountName: pod.Spec.ServiceAccountName,
volName: spec.Name(),
},
runner: runner,
spec: spec,
readOnly: readOnly,
}, nil
}
// NewUnmounter is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()), plugin.runner)
}
// newUnmounterInternal is the internal unmounter routine to clean the volume.
func (plugin *flexVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, runner exec.Interface) (volume.Unmounter, error) {
return &flexVolumeUnmounter{
flexVolume: &flexVolume{
driverName: plugin.driverName,
execPath: plugin.getExecutable(),
mounter: mounter,
plugin: plugin,
podUID: podUID,
volName: volName,
},
runner: runner,
}, nil
}
// NewAttacher is part of the volume.AttachableVolumePlugin interface.
func (plugin *flexVolumeAttachablePlugin) NewAttacher() (volume.Attacher, error) {
return &flexVolumeAttacher{plugin}, nil
}
// NewDetacher is part of the volume.AttachableVolumePlugin interface.
func (plugin *flexVolumeAttachablePlugin) NewDetacher() (volume.Detacher, error) {
return &flexVolumeDetacher{plugin}, nil
}
// ConstructVolumeSpec is part of the volume.AttachableVolumePlugin interface.
func (plugin *flexVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
flexVolume := &api.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
FlexVolume: &api.FlexVolumeSource{
Driver: plugin.driverName,
},
},
}
return volume.NewSpecFromVolume(flexVolume), nil
}
func (plugin *flexVolumePlugin) SupportsMountOption() bool {
return false
}
// Mark the given commands as unsupported.
func (plugin *flexVolumePlugin) unsupported(commands ...string) {
plugin.Lock()
defer plugin.Unlock()
plugin.unsupportedCommands = append(plugin.unsupportedCommands, commands...)
}
func (plugin *flexVolumePlugin) SupportsBulkVolumeVerification() bool {
return false
}
// Returns true iff the given command is known to be unsupported.
func (plugin *flexVolumePlugin) isUnsupported(command string) bool {
plugin.Lock()
defer plugin.Unlock()
for _, unsupportedCommand := range plugin.unsupportedCommands {
if command == unsupportedCommand {
return true
}
}
return false
}
func (plugin *flexVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
func (plugin *flexVolumePlugin) getDeviceMountPath(spec *volume.Spec) (string, error) {
volumeName, err := plugin.GetVolumeName(spec)
if err != nil {
return "", fmt.Errorf("GetVolumeName failed from getDeviceMountPath: %s", err)
}
mountsDir := path.Join(plugin.host.GetPluginDir(flexVolumePluginName), plugin.driverName, "mounts")
return path.Join(mountsDir, volumeName), nil
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"testing"
exec "k8s.io/utils/exec/testing"
)
func TestInit(t *testing.T) {
plugin, _ := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, successOutput(), "init"),
)
plugin.Init(plugin.host)
}
func fakeVolumeNameOutput(name string) exec.FakeCombinedOutputAction {
return fakeResultOutput(&DriverStatus{
Status: StatusSuccess,
VolumeName: name,
})
}
func TestGetVolumeName(t *testing.T) {
spec := fakeVolumeSpec()
plugin, _ := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, fakeVolumeNameOutput(spec.Name()), getVolumeNameCmd,
specJson(plugin, spec, nil)),
)
name, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetVolumeName() failed: %v", err)
}
expectedName := spec.Name()
if name != expectedName {
t.Errorf("GetVolumeName() returned %v instead of %v", name, expectedName)
}
}

234
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/volume"
"os"
"fmt"
"path/filepath"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"k8s.io/apimachinery/pkg/util/errors"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
)
type flexVolumeProber struct {
mutex sync.Mutex
pluginDir string // Flexvolume driver directory
watcher utilfs.FSWatcher
probeNeeded bool // Must only read and write this through testAndSetProbeNeeded.
lastUpdated time.Time // Last time probeNeeded was updated.
watchEventCount int
factory PluginFactory
fs utilfs.Filesystem
}
const (
// TODO (cxing) Tune these params based on test results.
// watchEventLimit is the max allowable number of processed watches within watchEventInterval.
watchEventInterval = 5 * time.Second
watchEventLimit = 20
)
func GetDynamicPluginProber(pluginDir string) volume.DynamicPluginProber {
return &flexVolumeProber{
pluginDir: pluginDir,
watcher: utilfs.NewFsnotifyWatcher(),
factory: pluginFactory{},
fs: &utilfs.DefaultFs{},
}
}
func (prober *flexVolumeProber) Init() error {
prober.testAndSetProbeNeeded(true)
prober.lastUpdated = time.Now()
if err := prober.createPluginDir(); err != nil {
return err
}
if err := prober.initWatcher(); err != nil {
return err
}
return nil
}
// Probes for Flexvolume drivers.
// If a filesystem update has occurred since the last probe, updated = true
// and the list of probed plugins is returned.
// Otherwise, update = false and probedPlugins = nil.
//
// If an error occurs, updated and plugins are set arbitrarily.
func (prober *flexVolumeProber) Probe() (updated bool, plugins []volume.VolumePlugin, err error) {
probeNeeded := prober.testAndSetProbeNeeded(false)
if !probeNeeded {
return false, nil, nil
}
files, err := prober.fs.ReadDir(prober.pluginDir)
if err != nil {
return false, nil, fmt.Errorf("Error reading the Flexvolume directory: %s", err)
}
plugins = []volume.VolumePlugin{}
allErrs := []error{}
for _, f := range files {
// only directories with names that do not begin with '.' are counted as plugins
// and pluginDir/dirname/dirname should be an executable
// unless dirname contains '~' for escaping namespace
// e.g. dirname = vendor~cifs
// then, executable will be pluginDir/dirname/cifs
if f.IsDir() && filepath.Base(f.Name())[0] != '.' {
plugin, pluginErr := prober.factory.NewFlexVolumePlugin(prober.pluginDir, f.Name())
if pluginErr != nil {
pluginErr = fmt.Errorf(
"Error creating Flexvolume plugin from directory %s, skipping. Error: %s",
f.Name(), pluginErr)
allErrs = append(allErrs, pluginErr)
continue
}
plugins = append(plugins, plugin)
}
}
return true, plugins, errors.NewAggregate(allErrs)
}
func (prober *flexVolumeProber) handleWatchEvent(event fsnotify.Event) error {
// event.Name is the watched path.
if filepath.Base(event.Name)[0] == '.' {
// Ignore files beginning with '.'
return nil
}
eventPathAbs, err := filepath.Abs(event.Name)
if err != nil {
return err
}
pluginDirAbs, err := filepath.Abs(prober.pluginDir)
if err != nil {
return err
}
// If the Flexvolume plugin directory is removed, need to recreate it
// in order to keep it under watch.
if eventOpIs(event, fsnotify.Remove) && eventPathAbs == pluginDirAbs {
if err := prober.createPluginDir(); err != nil {
return err
}
if err := prober.addWatchRecursive(pluginDirAbs); err != nil {
return err
}
} else if eventOpIs(event, fsnotify.Create) {
if err := prober.addWatchRecursive(eventPathAbs); err != nil {
return err
}
}
prober.updateProbeNeeded()
return nil
}
func (prober *flexVolumeProber) updateProbeNeeded() {
// Within 'watchEventInterval' seconds, a max of 'watchEventLimit' watch events is processed.
// The watch event will not be registered if the limit is reached.
// This prevents increased disk usage from Probe() being triggered too frequently (either
// accidentally or maliciously).
if time.Since(prober.lastUpdated) > watchEventInterval {
// Update, then reset the timer and watch count.
prober.testAndSetProbeNeeded(true)
prober.lastUpdated = time.Now()
prober.watchEventCount = 1
} else if prober.watchEventCount < watchEventLimit {
prober.testAndSetProbeNeeded(true)
prober.watchEventCount++
}
}
// Recursively adds to watch all directories inside and including the file specified by the given filename.
// If the file is a symlink to a directory, it will watch the symlink but not any of the subdirectories.
//
// Each file or directory change triggers two events: one from the watch on itself, another from the watch
// on its parent directory.
func (prober *flexVolumeProber) addWatchRecursive(filename string) error {
addWatch := func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
if err := prober.watcher.AddWatch(path); err != nil {
glog.Errorf("Error recursively adding watch: %v", err)
}
}
return nil
}
return prober.fs.Walk(filename, addWatch)
}
// Creates a new filesystem watcher and adds watches for the plugin directory
// and all of its subdirectories.
func (prober *flexVolumeProber) initWatcher() error {
err := prober.watcher.Init(func(event fsnotify.Event) {
if err := prober.handleWatchEvent(event); err != nil {
glog.Errorf("Flexvolume prober watch: %s", err)
}
}, func(err error) {
glog.Errorf("Received an error from watcher: %s", err)
})
if err != nil {
return fmt.Errorf("Error initializing watcher: %s", err)
}
if err := prober.addWatchRecursive(prober.pluginDir); err != nil {
return fmt.Errorf("Error adding watch on Flexvolume directory: %s", err)
}
prober.watcher.Run()
return nil
}
// Creates the plugin directory, if it doesn't already exist.
func (prober *flexVolumeProber) createPluginDir() error {
if _, err := prober.fs.Stat(prober.pluginDir); os.IsNotExist(err) {
glog.Warningf("Flexvolume plugin directory at %s does not exist. Recreating.", prober.pluginDir)
err := prober.fs.MkdirAll(prober.pluginDir, 0755)
if err != nil {
return fmt.Errorf("Error (re-)creating driver directory: %s", err)
}
}
return nil
}
func (prober *flexVolumeProber) testAndSetProbeNeeded(newval bool) (oldval bool) {
prober.mutex.Lock()
defer prober.mutex.Unlock()
oldval, prober.probeNeeded = prober.probeNeeded, newval
return
}
func eventOpIs(event fsnotify.Event, op fsnotify.Op) bool {
return event.Op&op == op
}

Some files were not shown because too many files have changed in this diff Show More