mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
vendor cleanup: remove unused,non-go and test files
This commit is contained in:
152
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
152
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
@ -1,152 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"metrics_cached.go",
|
||||
"metrics_du.go",
|
||||
"metrics_errors.go",
|
||||
"metrics_nil.go",
|
||||
"metrics_statfs.go",
|
||||
"plugins.go",
|
||||
"volume.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"volume_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/volume",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume/util/fs:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"metrics_nil_test.go",
|
||||
"plugins_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_xtest",
|
||||
srcs = [
|
||||
"metrics_statfs_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"metrics_du_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/volume/aws_ebs:all-srcs",
|
||||
"//pkg/volume/azure_dd:all-srcs",
|
||||
"//pkg/volume/azure_file:all-srcs",
|
||||
"//pkg/volume/cephfs:all-srcs",
|
||||
"//pkg/volume/cinder:all-srcs",
|
||||
"//pkg/volume/configmap:all-srcs",
|
||||
"//pkg/volume/csi:all-srcs",
|
||||
"//pkg/volume/downwardapi:all-srcs",
|
||||
"//pkg/volume/empty_dir:all-srcs",
|
||||
"//pkg/volume/fc:all-srcs",
|
||||
"//pkg/volume/flexvolume:all-srcs",
|
||||
"//pkg/volume/flocker:all-srcs",
|
||||
"//pkg/volume/gce_pd:all-srcs",
|
||||
"//pkg/volume/git_repo:all-srcs",
|
||||
"//pkg/volume/glusterfs:all-srcs",
|
||||
"//pkg/volume/host_path:all-srcs",
|
||||
"//pkg/volume/iscsi:all-srcs",
|
||||
"//pkg/volume/local:all-srcs",
|
||||
"//pkg/volume/nfs:all-srcs",
|
||||
"//pkg/volume/photon_pd:all-srcs",
|
||||
"//pkg/volume/portworx:all-srcs",
|
||||
"//pkg/volume/projected:all-srcs",
|
||||
"//pkg/volume/quobyte:all-srcs",
|
||||
"//pkg/volume/rbd:all-srcs",
|
||||
"//pkg/volume/scaleio:all-srcs",
|
||||
"//pkg/volume/secret:all-srcs",
|
||||
"//pkg/volume/storageos:all-srcs",
|
||||
"//pkg/volume/testing:all-srcs",
|
||||
"//pkg/volume/util:all-srcs",
|
||||
"//pkg/volume/validation:all-srcs",
|
||||
"//pkg/volume/vsphere_volume:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
17
vendor/k8s.io/kubernetes/pkg/volume/OWNERS
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/OWNERS
generated
vendored
@ -1,17 +0,0 @@
|
||||
approvers:
|
||||
- saad-ali
|
||||
- thockin
|
||||
- jsafrane
|
||||
- matchstick
|
||||
- rootfs
|
||||
- gnufied
|
||||
- childsb
|
||||
reviewers:
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
||||
- gnufied
|
||||
- verult
|
||||
- davidz627
|
71
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
71
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
@ -1,71 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"aws_ebs.go",
|
||||
"aws_ebs_block.go",
|
||||
"aws_util.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"aws_ebs_block_test.go",
|
||||
"aws_ebs_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
13
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/OWNERS
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/OWNERS
generated
vendored
@ -1,13 +0,0 @@
|
||||
approvers:
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- justinsb
|
||||
reviewers:
|
||||
- chrislovecnm
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- justinsb
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- saad-ali
|
277
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
277
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
@ -1,277 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type awsElasticBlockStoreAttacher struct {
|
||||
host volume.VolumeHost
|
||||
awsVolumes aws.Volumes
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &awsElasticBlockStoreAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error) {
|
||||
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreAttacher{
|
||||
host: plugin.host,
|
||||
awsVolumes: awsCloud,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := aws.KubernetesVolumeID(volumeSource.VolumeID)
|
||||
|
||||
// awsCloud.AttachDisk checks if disk is already attached to node and
|
||||
// succeeds in that case, so no need to do that separately.
|
||||
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return devicePath, nil
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
|
||||
glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for AWS", nodeName)
|
||||
volumeNodeMap := map[types.NodeName][]*volume.Spec{
|
||||
nodeName: specs,
|
||||
}
|
||||
nodeVolumesResult := make(map[*volume.Spec]bool)
|
||||
nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap)
|
||||
if err != nil {
|
||||
glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err)
|
||||
return nodeVolumesResult, err
|
||||
}
|
||||
|
||||
if result, ok := nodesVerificationMap[nodeName]; ok {
|
||||
return result, nil
|
||||
}
|
||||
return nodeVolumesResult, nil
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool)
|
||||
diskNamesByNode := make(map[types.NodeName][]aws.KubernetesVolumeID)
|
||||
volumeSpecMap := make(map[aws.KubernetesVolumeID]*volume.Spec)
|
||||
|
||||
for nodeName, volumeSpecs := range volumesByNode {
|
||||
for _, volumeSpec := range volumeSpecs {
|
||||
volumeSource, _, err := getVolumeSource(volumeSpec)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
name := aws.KubernetesVolumeID(volumeSource.VolumeID)
|
||||
diskNamesByNode[nodeName] = append(diskNamesByNode[nodeName], name)
|
||||
|
||||
nodeDisk, nodeDiskExists := volumesAttachedCheck[nodeName]
|
||||
|
||||
if !nodeDiskExists {
|
||||
nodeDisk = make(map[*volume.Spec]bool)
|
||||
}
|
||||
nodeDisk[volumeSpec] = true
|
||||
volumeSpecMap[name] = volumeSpec
|
||||
volumesAttachedCheck[nodeName] = nodeDisk
|
||||
}
|
||||
}
|
||||
attachedResult, err := attacher.awsVolumes.DisksAreAttached(diskNamesByNode)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Error checking if volumes are attached to nodes err = %v", err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
for nodeName, nodeDisks := range attachedResult {
|
||||
for diskName, attached := range nodeDisks {
|
||||
if !attached {
|
||||
spec := volumeSpecMap[diskName]
|
||||
setNodeDisk(volumesAttachedCheck, spec, nodeName, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := volumeSource.VolumeID
|
||||
partition := ""
|
||||
if volumeSource.Partition != 0 {
|
||||
partition = strconv.Itoa(int(volumeSource.Partition))
|
||||
}
|
||||
|
||||
if devicePath == "" {
|
||||
return "", fmt.Errorf("WaitForAttach failed for AWS Volume %q: devicePath is empty.", volumeID)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(checkSleepDuration)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
|
||||
devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath)
|
||||
path, err := verifyDevicePath(devicePaths)
|
||||
if err != nil {
|
||||
// Log error, if any, and continue checking periodically. See issue #11321
|
||||
glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err)
|
||||
} else if path != "" {
|
||||
// A device path has successfully been created for the PD
|
||||
glog.Infof("Successfully found attached AWS Volume %q.", volumeID)
|
||||
return path, nil
|
||||
}
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("Could not find attached AWS Volume %q. Timeout waiting for mount paths to be created.", volumeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDPath(attacher.host, aws.KubernetesVolumeID(volumeSource.VolumeID)), nil
|
||||
}
|
||||
|
||||
// FIXME: this method can be further pruned.
|
||||
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.host.GetMounter(awsElasticBlockStorePluginName)
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreDetacher struct {
|
||||
mounter mount.Interface
|
||||
awsVolumes aws.Volumes
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &awsElasticBlockStoreDetacher{}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) {
|
||||
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreDetacher{
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
awsVolumes: awsCloud,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *awsElasticBlockStoreDetacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
volumeID := aws.KubernetesVolumeID(path.Base(volumeName))
|
||||
|
||||
if _, err := detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil {
|
||||
glog.Errorf("Error detaching volumeID %q: %v", volumeID, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
|
||||
}
|
||||
|
||||
func setNodeDisk(
|
||||
nodeDiskMap map[types.NodeName]map[*volume.Spec]bool,
|
||||
volumeSpec *volume.Spec,
|
||||
nodeName types.NodeName,
|
||||
check bool) {
|
||||
|
||||
volumeMap := nodeDiskMap[nodeName]
|
||||
if volumeMap == nil {
|
||||
volumeMap = make(map[*volume.Spec]bool)
|
||||
nodeDiskMap[nodeName] = volumeMap
|
||||
}
|
||||
volumeMap[volumeSpec] = check
|
||||
}
|
295
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher_test.go
generated
vendored
295
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher_test.go
generated
vendored
@ -1,295 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func TestGetVolumeName_Volume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := aws.KubernetesVolumeID("my-aws-volume")
|
||||
spec := createVolSpec(name, false)
|
||||
|
||||
volumeName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetVolumeName error: %v", err)
|
||||
}
|
||||
if volumeName != string(name) {
|
||||
t.Errorf("GetVolumeName error: expected %s, got %s", name, volumeName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumeName_PersistentVolume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := aws.KubernetesVolumeID("my-aws-pv")
|
||||
spec := createPVSpec(name, true)
|
||||
|
||||
volumeName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetVolumeName error: %v", err)
|
||||
}
|
||||
if volumeName != string(name) {
|
||||
t.Errorf("GetVolumeName error: expected %s, got %s", name, volumeName)
|
||||
}
|
||||
}
|
||||
|
||||
// One testcase for TestAttachDetach table test below
|
||||
type testcase struct {
|
||||
name aws.KubernetesVolumeID
|
||||
// For fake AWS:
|
||||
attach attachCall
|
||||
detach detachCall
|
||||
t *testing.T
|
||||
|
||||
// Actual test to run
|
||||
test func(test *testcase) (string, error)
|
||||
// Expected return of the test
|
||||
expectedDevice string
|
||||
expectedError error
|
||||
}
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := aws.KubernetesVolumeID("disk")
|
||||
nodeName := types.NodeName("instance")
|
||||
spec := createVolSpec(diskName, false)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
tests := []testcase{
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
attach: attachCall{diskName, nodeName, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedDevice: "/dev/sda",
|
||||
},
|
||||
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
attach: attachCall{diskName, nodeName, "", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: attachError,
|
||||
},
|
||||
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
detach: detachCall{diskName, nodeName, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
mountPath := "/mnt/" + string(diskName)
|
||||
return "", detacher.Detach(mountPath, nodeName)
|
||||
},
|
||||
},
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
detach: detachCall{diskName, nodeName, "", detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
mountPath := "/mnt/" + string(diskName)
|
||||
return "", detacher.Detach(mountPath, nodeName)
|
||||
},
|
||||
expectedError: detachError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
testcase.t = t
|
||||
device, err := testcase.test(&testcase)
|
||||
if err != testcase.expectedError {
|
||||
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError.Error(), err.Error())
|
||||
}
|
||||
if device != testcase.expectedDevice {
|
||||
t.Errorf("%s failed: expected device=%q, got %q", testcase.name, testcase.expectedDevice, device)
|
||||
}
|
||||
t.Logf("Test %q succeeded", testcase.name)
|
||||
}
|
||||
}
|
||||
|
||||
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
|
||||
// and NewDetacher won't work.
|
||||
func newPlugin() *awsElasticBlockStorePlugin {
|
||||
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
|
||||
plugins := ProbeVolumePlugins()
|
||||
plugin := plugins[0]
|
||||
plugin.Init(host)
|
||||
return plugin.(*awsElasticBlockStorePlugin)
|
||||
}
|
||||
|
||||
func newAttacher(testcase *testcase) *awsElasticBlockStoreAttacher {
|
||||
return &awsElasticBlockStoreAttacher{
|
||||
host: nil,
|
||||
awsVolumes: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func newDetacher(testcase *testcase) *awsElasticBlockStoreDetacher {
|
||||
return &awsElasticBlockStoreDetacher{
|
||||
awsVolumes: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func createVolSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: string(name),
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: string(name),
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Fake AWS implementation
|
||||
|
||||
type attachCall struct {
|
||||
diskName aws.KubernetesVolumeID
|
||||
nodeName types.NodeName
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
|
||||
type detachCall struct {
|
||||
diskName aws.KubernetesVolumeID
|
||||
nodeName types.NodeName
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
diskName aws.KubernetesVolumeID
|
||||
nodeName types.NodeName
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
return "", errors.New("Unexpected AttachDisk call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
return "", errors.New("Unexpected DetachDisk call!")
|
||||
}
|
||||
|
||||
if expected.diskName != diskName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
|
||||
return "", errors.New("Unexpected DetachDisk call: wrong diskName")
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return "", errors.New("Unexpected DetachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
|
||||
// DetachDisk no longer relies on DiskIsAttached api call
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (testcase *testcase) DisksAreAttached(nodeDisks map[types.NodeName][]aws.KubernetesVolumeID) (map[types.NodeName]map[aws.KubernetesVolumeID]bool, error) {
|
||||
return nil, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateDisk(volumeOptions *aws.VolumeOptions) (volumeName aws.KubernetesVolumeID, err error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) DeleteDisk(volumeName aws.KubernetesVolumeID) (bool, error) {
|
||||
return false, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetVolumeLabels(volumeName aws.KubernetesVolumeID) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetDiskPath(volumeName aws.KubernetesVolumeID) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) ResizeDisk(
|
||||
volumeName aws.KubernetesVolumeID,
|
||||
oldSize resource.Quantity,
|
||||
newSize resource.Quantity) (resource.Quantity, error) {
|
||||
return oldSize, errors.New("Not implemented")
|
||||
}
|
561
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
561
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
@ -1,561 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}}
|
||||
}
|
||||
|
||||
type awsElasticBlockStorePlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
|
||||
const (
|
||||
awsElasticBlockStorePluginName = "kubernetes.io/aws-ebs"
|
||||
awsURLNamePrefix = "aws://"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(awsElasticBlockStorePluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetPluginName() string {
|
||||
return awsElasticBlockStorePluginName
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.VolumeID, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil)
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) SupportsMountOption() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) SupportsBulkVolumeVerification() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, error) {
|
||||
volumeLimits := map[string]int64{
|
||||
util.EBSVolumeLimitKey: 39,
|
||||
}
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
|
||||
// if we can't fetch cloudprovider we return an error
|
||||
// hoping external CCM or admin can set it. Returning
|
||||
// default values from here will mean, no one can
|
||||
// override them.
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("No cloudprovider present")
|
||||
}
|
||||
|
||||
if cloud.ProviderName() != aws.ProviderName {
|
||||
return nil, fmt.Errorf("Expected aws cloud, found %s", cloud.ProviderName())
|
||||
}
|
||||
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
glog.V(3).Infof("Failed to get instances from cloud provider")
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance type from AWS cloud provider")
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
if ok, _ := regexp.MatchString("^[cm]5.*", instanceType); ok {
|
||||
volumeLimits[util.EBSVolumeLimitKey] = 25
|
||||
}
|
||||
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) VolumeLimitKey(spec *volume.Spec) string {
|
||||
return util.EBSVolumeLimitKey
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
ebs, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
|
||||
fsType := ebs.FSType
|
||||
partition := ""
|
||||
if ebs.Partition != 0 {
|
||||
partition = strconv.Itoa(int(ebs.Partition))
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreMounter{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
volumeID: volumeID,
|
||||
partition: partition,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newUnmounterInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &awsElasticBlockStoreUnmounter{&awsElasticBlockStore{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return plugin.newDeleterInternal(spec, &AWSDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil {
|
||||
glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
|
||||
}
|
||||
return &awsElasticBlockStoreDeleter{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
volName: spec.Name(),
|
||||
volumeID: aws.KubernetesVolumeID(spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID),
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return plugin.newProvisionerInternal(options, &AWSDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.VolumeOptions, manager ebsManager) (volume.Provisioner, error) {
|
||||
return &awsElasticBlockStoreProvisioner{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getVolumeSource(
|
||||
spec *volume.Spec) (*v1.AWSElasticBlockStoreVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
|
||||
return spec.Volume.AWSElasticBlockStore, spec.Volume.AWSElasticBlockStore.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.AWSElasticBlockStore != nil {
|
||||
return spec.PersistentVolume.Spec.AWSElasticBlockStore, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference an AWS EBS volume type")
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
volumeID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This is a workaround to fix the issue in converting aws volume id from globalPDPath
|
||||
// There are three aws volume id formats and their volumeID from GetDeviceNameFromMount() are:
|
||||
// aws:///vol-1234 (aws/vol-1234)
|
||||
// aws://us-east-1/vol-1234 (aws/us-east-1/vol-1234)
|
||||
// vol-1234 (vol-1234)
|
||||
// This code is for converting volume id to aws style volume id for the first two cases.
|
||||
sourceName := volumeID
|
||||
if strings.HasPrefix(volumeID, "aws/") {
|
||||
names := strings.Split(volumeID, "/")
|
||||
length := len(names)
|
||||
if length < 2 || length > 3 {
|
||||
return nil, fmt.Errorf("Failed to get AWS volume id from mount path %q: invalid volume name format %q", mountPath, volumeID)
|
||||
}
|
||||
volName := names[length-1]
|
||||
if !strings.HasPrefix(volName, "vol-") {
|
||||
return nil, fmt.Errorf("Invalid volume name format for AWS volume (%q) retrieved from mount path %q", volName, mountPath)
|
||||
}
|
||||
if length == 2 {
|
||||
sourceName = awsURLNamePrefix + "" + "/" + volName // empty zone label
|
||||
}
|
||||
if length == 3 {
|
||||
sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label
|
||||
}
|
||||
glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName)
|
||||
}
|
||||
|
||||
awsVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(awsVolume), nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) RequiresFSResize() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice(
|
||||
spec *volume.Spec,
|
||||
newSize resource.Quantity,
|
||||
oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
var awsVolume aws.Volumes
|
||||
|
||||
awsVolume, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
// we don't expect to receive this call for non PVs
|
||||
rawVolumeName := spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID
|
||||
volumeID := aws.KubernetesVolumeID(rawVolumeName)
|
||||
|
||||
if volumeID == "" {
|
||||
return oldSize, fmt.Errorf("EBS.ExpandVolumeDevice Invalid volume id for %s", spec.Name())
|
||||
}
|
||||
return awsVolume.ResizeDisk(volumeID, oldSize, newSize)
|
||||
}
|
||||
|
||||
var _ volume.ExpandableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.VolumePluginWithAttachLimits = &awsElasticBlockStorePlugin{}
|
||||
|
||||
// Abstract interface to PD operations.
|
||||
type ebsManager interface {
|
||||
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *awsElasticBlockStoreDeleter) error
|
||||
}
|
||||
|
||||
// awsElasticBlockStore volumes are disk resources provided by Amazon Web Services
|
||||
// that are attached to the kubelet's host machine and exposed to the pod.
|
||||
type awsElasticBlockStore struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
// Unique id of the PD, used to find the disk resource in the provider.
|
||||
volumeID aws.KubernetesVolumeID
|
||||
// Specifies the partition to mount
|
||||
partition string
|
||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
||||
manager ebsManager
|
||||
// Mounter interface that provides system calls to mount the global path to the pod local path.
|
||||
mounter mount.Interface
|
||||
plugin *awsElasticBlockStorePlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreMounter struct {
|
||||
*awsElasticBlockStore
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &awsElasticBlockStoreMounter{}
|
||||
|
||||
func (b *awsElasticBlockStoreMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *awsElasticBlockStoreMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate mount point: %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
|
||||
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volumeID)
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("failed to unmount %s: %v", dir, mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
glog.Errorf("Mount of disk %s failed: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Successfully mounted %s", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) string {
|
||||
// Clean up the URI to be more fs-friendly
|
||||
name := string(volumeID)
|
||||
name = strings.Replace(name, "://", "/", -1)
|
||||
return filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
|
||||
}
|
||||
|
||||
// Reverses the mapping done in makeGlobalPDPath
|
||||
func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
|
||||
basePath := filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
|
||||
rel, err := filepath.Rel(basePath, globalPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
|
||||
return "", err
|
||||
}
|
||||
if strings.Contains(rel, "../") {
|
||||
glog.Errorf("Unexpected mount path: %s", globalPath)
|
||||
return "", fmt.Errorf("unexpected mount path: " + globalPath)
|
||||
}
|
||||
// Reverse the :// replacement done in makeGlobalPDPath
|
||||
volumeID := rel
|
||||
if strings.HasPrefix(volumeID, "aws/") {
|
||||
volumeID = strings.Replace(volumeID, "aws/", "aws://", 1)
|
||||
}
|
||||
glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID)
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (ebs *awsElasticBlockStore) GetPath() string {
|
||||
return getPath(ebs.podUID, ebs.volName, ebs.plugin.host)
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreUnmounter struct {
|
||||
*awsElasticBlockStore
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &awsElasticBlockStoreUnmounter{}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *awsElasticBlockStoreUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// Unmounts the bind mount
|
||||
func (c *awsElasticBlockStoreUnmounter) TearDownAt(dir string) error {
|
||||
return util.UnmountPath(dir, c.mounter)
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreDeleter struct {
|
||||
*awsElasticBlockStore
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &awsElasticBlockStoreDeleter{}
|
||||
|
||||
func (d *awsElasticBlockStoreDeleter) GetPath() string {
|
||||
return getPath(d.podUID, d.volName, d.plugin.host)
|
||||
}
|
||||
|
||||
func (d *awsElasticBlockStoreDeleter) Delete() error {
|
||||
return d.manager.DeleteVolume(d)
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreProvisioner struct {
|
||||
*awsElasticBlockStore
|
||||
options volume.VolumeOptions
|
||||
namespace string
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
|
||||
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
glog.Errorf("Provision failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fstype == "" {
|
||||
fstype = "ext4"
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
util.VolumeDynamicallyCreatedByKey: "aws-ebs-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: string(volumeID),
|
||||
FSType: fstype,
|
||||
Partition: 0,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
MountOptions: c.options.MountOptions,
|
||||
},
|
||||
}
|
||||
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
if len(labels) != 0 {
|
||||
if pv.Labels == nil {
|
||||
pv.Labels = make(map[string]string)
|
||||
}
|
||||
for k, v := range labels {
|
||||
pv.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
179
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go
generated
vendored
179
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go
generated
vendored
@ -1,179 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.BlockVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName)
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("globalMapPathUUID: %s", globalMapPathUUID)
|
||||
|
||||
globalMapPath := filepath.Dir(globalMapPathUUID)
|
||||
if len(globalMapPath) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
|
||||
}
|
||||
|
||||
return getVolumeSpecFromGlobalMapPath(globalMapPath)
|
||||
}
|
||||
|
||||
func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) {
|
||||
// Get volume spec information from globalMapPath
|
||||
// globalMapPath example:
|
||||
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
|
||||
// plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX
|
||||
vID := filepath.Base(globalMapPath)
|
||||
if len(vID) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get volumeID from global path=%s", globalMapPath)
|
||||
}
|
||||
if !strings.Contains(vID, "vol-") {
|
||||
return nil, fmt.Errorf("failed to get volumeID from global path=%s, invalid volumeID format = %s", globalMapPath, vID)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
awsVolume := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: vID,
|
||||
},
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(awsVolume, true), nil
|
||||
}
|
||||
|
||||
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
|
||||
func (plugin *awsElasticBlockStorePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
|
||||
// Pass empty string as dummy uid since uid isn't used in the case.
|
||||
var uid types.UID
|
||||
if pod != nil {
|
||||
uid = pod.UID
|
||||
}
|
||||
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
|
||||
ebs, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
|
||||
partition := ""
|
||||
if ebs.Partition != 0 {
|
||||
partition = strconv.Itoa(int(ebs.Partition))
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreMapper{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
volumeID: volumeID,
|
||||
partition: partition,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
},
|
||||
readOnly: readOnly}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return plugin.newUnmapperInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newUnmapperInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) {
|
||||
return &awsElasticBlockStoreUnmapper{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (c *awsElasticBlockStoreUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreUnmapper struct {
|
||||
*awsElasticBlockStore
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &awsElasticBlockStoreUnmapper{}
|
||||
|
||||
type awsElasticBlockStoreMapper struct {
|
||||
*awsElasticBlockStore
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &awsElasticBlockStoreMapper{}
|
||||
|
||||
func (b *awsElasticBlockStoreMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *awsElasticBlockStoreMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
|
||||
// plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX
|
||||
func (ebs *awsElasticBlockStore) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(ebs.plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName), string(volumeSource.VolumeID)), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~aws
|
||||
func (ebs *awsElasticBlockStore) GetPodDeviceMapPath() (string, string) {
|
||||
name := awsElasticBlockStorePluginName
|
||||
return ebs.plugin.host.GetPodVolumeDeviceDir(ebs.podUID, kstrings.EscapeQualifiedNameForDisk(name)), ebs.volName
|
||||
}
|
145
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block_test.go
generated
vendored
145
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block_test.go
generated
vendored
@ -1,145 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testVolName = "vol-1234"
|
||||
testPVName = "pv1"
|
||||
testGlobalPath = "plugins/kubernetes.io/aws-ebs/volumeDevices/vol-1234"
|
||||
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~aws-ebs"
|
||||
)
|
||||
|
||||
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
|
||||
// make our test path for fake GlobalMapPath
|
||||
// /tmp symbolized our pluginDir
|
||||
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/gce-pd/volumeDevices/pdVol1
|
||||
tmpVDir, err := utiltesting.MkTmpdir("awsBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
|
||||
//Bad Path
|
||||
badspec, err := getVolumeSpecFromGlobalMapPath("")
|
||||
if badspec != nil || err == nil {
|
||||
t.Fatalf("Expected not to get spec from GlobalMapPath but did")
|
||||
}
|
||||
|
||||
// Good Path
|
||||
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath)
|
||||
if spec == nil || err != nil {
|
||||
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
|
||||
}
|
||||
if spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID != testVolName {
|
||||
t.Errorf("Invalid volumeID from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
specMode := spec.PersistentVolume.Spec.VolumeMode
|
||||
if &specMode == nil {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v - %v", &specMode, block)
|
||||
}
|
||||
if *specMode != block {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v - %v", *specMode, block)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestVolume(readOnly bool, isBlock bool) *volume.Spec {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPVName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: testVolName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
pv.Spec.VolumeMode = &blockMode
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
||||
}
|
||||
|
||||
func TestGetPodAndPluginMapPaths(t *testing.T) {
|
||||
tmpVDir, err := utiltesting.MkTmpdir("awsBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := filepath.Join(tmpVDir, testPodPath)
|
||||
|
||||
spec := getTestVolume(false, true /*isBlock*/)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
|
||||
plug, err := plugMgr.FindMapperPluginByName(awsElasticBlockStorePluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpVDir)
|
||||
t.Fatalf("Can't find the plugin by name: %q", awsElasticBlockStorePluginName)
|
||||
}
|
||||
if plug.GetPluginName() != awsElasticBlockStorePluginName {
|
||||
t.Fatalf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mapper == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
//GetGlobalMapPath
|
||||
gMapPath, err := mapper.GetGlobalMapPath(spec)
|
||||
if err != nil || len(gMapPath) == 0 {
|
||||
t.Fatalf("Invalid path from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.GCEPersistentDisk.PDName)
|
||||
}
|
||||
if gMapPath != expectedGlobalPath {
|
||||
t.Fatalf("Failed to get GlobalMapPath: %s %s", gMapPath, expectedGlobalPath)
|
||||
}
|
||||
|
||||
//GetPodDeviceMapPath
|
||||
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
|
||||
if gDevicePath != expectedPodPath {
|
||||
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
|
||||
}
|
||||
if gVolName != testPVName {
|
||||
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
|
||||
}
|
||||
}
|
295
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
295
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
@ -1,295 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/aws-ebs" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) {
|
||||
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteOnce)
|
||||
}
|
||||
if volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
|
||||
t.Errorf("Expected not to support AccessModeTypes: %s", v1.ReadOnlyMany)
|
||||
}
|
||||
}
|
||||
|
||||
type fakePDManager struct {
|
||||
}
|
||||
|
||||
// TODO(jonesdl) To fully test this, we could create a loopback device
|
||||
// and mount that instead.
|
||||
func (fake *fakePDManager) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error) {
|
||||
labels = make(map[string]string)
|
||||
labels["fakepdmanager"] = "yes"
|
||||
return "test-aws-volume-name", 100, labels, "", nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(cd *awsElasticBlockStoreDeleter) error {
|
||||
if cd.volumeID != "test-aws-volume-name" {
|
||||
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.volumeID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeManager := &fakePDManager{}
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fakeManager = &fakePDManager{}
|
||||
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
|
||||
// Test Provisioner
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
|
||||
if err != nil {
|
||||
t.Errorf("Error creating new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
||||
if persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID != "test-aws-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID)
|
||||
}
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 100*1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
}
|
||||
|
||||
if persistentSpec.Labels["fakepdmanager"] != "yes" {
|
||||
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
|
||||
}
|
||||
|
||||
// Test Deleter
|
||||
volSpec := &volume.Spec{
|
||||
PersistentVolume: persistentSpec,
|
||||
}
|
||||
deleter, err := plug.(*awsElasticBlockStorePlugin).newDeleterInternal(volSpec, &fakePDManager{})
|
||||
if err != nil {
|
||||
t.Errorf("Error creating new deleter:%v", err)
|
||||
}
|
||||
err = deleter.Delete()
|
||||
if err != nil {
|
||||
t.Errorf("Deleter() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, clientset, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Error creating new mounter:%v", err)
|
||||
}
|
||||
if _, ok := mounter.(volume.Unmounter); ok {
|
||||
t.Errorf("Volume Mounter can be type-assert to Unmounter")
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Error creating new unmounter:%v", err)
|
||||
}
|
||||
if _, ok := unmounter.(volume.Mounter); ok {
|
||||
t.Errorf("Volume Unmounter can be type-assert to Mounter")
|
||||
}
|
||||
}
|
255
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go
generated
vendored
255
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go
generated
vendored
@ -1,255 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
diskPartitionSuffix = ""
|
||||
diskXVDPath = "/dev/xvd"
|
||||
diskXVDPattern = "/dev/xvd*"
|
||||
maxChecks = 60
|
||||
maxRetries = 10
|
||||
checkSleepDuration = time.Second
|
||||
errorSleepDuration = 5 * time.Second
|
||||
)
|
||||
|
||||
type AWSDiskUtil struct{}
|
||||
|
||||
func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error {
|
||||
cloud, err := getCloudProvider(d.awsElasticBlockStore.plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deleted, err := cloud.DeleteDisk(d.volumeID)
|
||||
if err != nil {
|
||||
// AWS cloud provider returns volume.deletedVolumeInUseError when
|
||||
// necessary, no handling needed here.
|
||||
glog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err)
|
||||
return err
|
||||
}
|
||||
if deleted {
|
||||
glog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID)
|
||||
} else {
|
||||
glog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateVolume creates an AWS EBS volume.
|
||||
// Returns: volumeID, volumeSizeGB, labels, error
|
||||
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.KubernetesVolumeID, int, map[string]string, string, error) {
|
||||
cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
|
||||
// AWS volumes don't have Name field, store the name in Name tag
|
||||
var tags map[string]string
|
||||
if c.options.CloudTags == nil {
|
||||
tags = make(map[string]string)
|
||||
} else {
|
||||
tags = *c.options.CloudTags
|
||||
}
|
||||
tags["Name"] = volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
|
||||
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
// AWS works with gigabytes, convert to GiB with rounding up
|
||||
requestGB := int(volumeutil.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
volumeOptions := &aws.VolumeOptions{
|
||||
CapacityGB: requestGB,
|
||||
Tags: tags,
|
||||
PVCName: c.options.PVC.Name,
|
||||
}
|
||||
fstype := ""
|
||||
// Apply Parameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
volumeOptions.ZonePresent = false
|
||||
volumeOptions.ZonesPresent = false
|
||||
for k, v := range c.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "type":
|
||||
volumeOptions.VolumeType = v
|
||||
case "zone":
|
||||
volumeOptions.ZonePresent = true
|
||||
volumeOptions.AvailabilityZone = v
|
||||
case "zones":
|
||||
volumeOptions.ZonesPresent = true
|
||||
volumeOptions.AvailabilityZones = v
|
||||
case "iopspergb":
|
||||
volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return "", 0, nil, "", fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
|
||||
}
|
||||
case "encrypted":
|
||||
volumeOptions.Encrypted, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return "", 0, nil, "", fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
|
||||
}
|
||||
case "kmskeyid":
|
||||
volumeOptions.KmsKeyId = v
|
||||
case volume.VolumeParameterFSType:
|
||||
fstype = v
|
||||
default:
|
||||
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
|
||||
if volumeOptions.ZonePresent && volumeOptions.ZonesPresent {
|
||||
return "", 0, nil, "", fmt.Errorf("both zone and zones StorageClass parameters must not be used at the same time")
|
||||
}
|
||||
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS")
|
||||
}
|
||||
|
||||
name, err := cloud.CreateDisk(volumeOptions)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
glog.V(2).Infof("Successfully created EBS Disk volume %s", name)
|
||||
|
||||
labels, err := cloud.GetVolumeLabels(name)
|
||||
if err != nil {
|
||||
// We don't really want to leak the volume here...
|
||||
glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
|
||||
}
|
||||
|
||||
return name, int(requestGB), labels, fstype, nil
|
||||
}
|
||||
|
||||
// Returns the first path that exists, or empty string if none exist.
|
||||
func verifyDevicePath(devicePaths []string) (string, error) {
|
||||
for _, path := range devicePaths {
|
||||
if pathExists, err := volumeutil.PathExists(path); err != nil {
|
||||
return "", fmt.Errorf("Error checking if path exists: %v", err)
|
||||
} else if pathExists {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Returns the first path that exists, or empty string if none exist.
|
||||
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
|
||||
allPathsRemoved := true
|
||||
for _, path := range devicePaths {
|
||||
if exists, err := volumeutil.PathExists(path); err != nil {
|
||||
return false, fmt.Errorf("Error checking if path exists: %v", err)
|
||||
} else {
|
||||
allPathsRemoved = allPathsRemoved && !exists
|
||||
}
|
||||
}
|
||||
|
||||
return allPathsRemoved, nil
|
||||
}
|
||||
|
||||
// Returns list of all paths for given EBS mount
|
||||
// This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id)
|
||||
// Here it is mostly about applying the partition path
|
||||
func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string {
|
||||
devicePaths := []string{}
|
||||
if devicePath != "" {
|
||||
devicePaths = append(devicePaths, devicePath)
|
||||
}
|
||||
|
||||
if partition != "" {
|
||||
for i, path := range devicePaths {
|
||||
devicePaths[i] = path + diskPartitionSuffix + partition
|
||||
}
|
||||
}
|
||||
|
||||
// We need to find NVME volumes, which are mounted on a "random" nvme path ("/dev/nvme0n1"),
|
||||
// and we have to get the volume id from the nvme interface
|
||||
awsVolumeID, err := volumeID.MapToAWSVolumeID()
|
||||
if err != nil {
|
||||
glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err)
|
||||
} else {
|
||||
// This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/
|
||||
// For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23
|
||||
nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1)
|
||||
nvmePath, err := findNvmeVolume(nvmeName)
|
||||
if err != nil {
|
||||
glog.Warningf("error looking for nvme volume %q: %v", volumeID, err)
|
||||
} else if nvmePath != "" {
|
||||
devicePaths = append(devicePaths, nvmePath)
|
||||
}
|
||||
}
|
||||
|
||||
return devicePaths
|
||||
}
|
||||
|
||||
// Return cloud provider
|
||||
func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error) {
|
||||
awsCloudProvider, ok := cloudProvider.(*aws.Cloud)
|
||||
if !ok || awsCloudProvider == nil {
|
||||
return nil, fmt.Errorf("Failed to get AWS Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
|
||||
return awsCloudProvider, nil
|
||||
}
|
||||
|
||||
// findNvmeVolume looks for the nvme volume with the specified name
|
||||
// It follows the symlink (if it exists) and returns the absolute path to the device
|
||||
func findNvmeVolume(findName string) (device string, err error) {
|
||||
p := filepath.Join("/dev/disk/by-id/", findName)
|
||||
stat, err := os.Lstat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
glog.V(6).Infof("nvme path not found %q", p)
|
||||
return "", nil
|
||||
}
|
||||
return "", fmt.Errorf("error getting stat of %q: %v", p, err)
|
||||
}
|
||||
|
||||
if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
glog.Warningf("nvme file %q found, but was not a symlink", p)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Find the target, resolving to an absolute path
|
||||
// For example, /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 -> ../../nvme2n1
|
||||
resolved, err := filepath.EvalSymlinks(p)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading target of symlink %q: %v", p, err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(resolved, "/dev") {
|
||||
return "", fmt.Errorf("resolved symlink for %q was unexpected: %q", p, resolved)
|
||||
}
|
||||
|
||||
return resolved, nil
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package aws_ebs contains the internal representation of AWS Elastic
|
||||
// Block Store volumes.
|
||||
package aws_ebs // import "k8s.io/kubernetes/pkg/volume/aws_ebs"
|
109
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD
generated
vendored
109
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD
generated
vendored
@ -1,109 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"azure_common.go",
|
||||
"azure_dd.go",
|
||||
"azure_dd_block.go",
|
||||
"azure_mounter.go",
|
||||
"azure_provision.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"azure_common_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"azure_common_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/azure_dd",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_common_test.go",
|
||||
"azure_dd_block_test.go",
|
||||
"azure_dd_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
18
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS
generated
vendored
@ -1,18 +0,0 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- feiskyer
|
||||
- karataliu
|
||||
- khenidak
|
||||
- rootfs
|
||||
reviewers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- feiskyer
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
- karataliu
|
||||
- khenidak
|
||||
- rootfs
|
||||
- saad-ali
|
309
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
309
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
@ -1,309 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type azureDiskDetacher struct {
|
||||
plugin *azureDataDiskPlugin
|
||||
cloud *azure.Cloud
|
||||
}
|
||||
|
||||
type azureDiskAttacher struct {
|
||||
plugin *azureDataDiskPlugin
|
||||
cloud *azure.Cloud
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &azureDiskAttacher{}
|
||||
var _ volume.Detacher = &azureDiskDetacher{}
|
||||
|
||||
// acquire lock to get an lun number
|
||||
var getLunMutex = keymutex.NewKeyMutex()
|
||||
|
||||
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
|
||||
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure disk spec (%v)", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
instanceid, err := a.cloud.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure instance id (%v)", err)
|
||||
return "", fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(a.plugin.host)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// Log error and continue with attach
|
||||
glog.Warningf(
|
||||
"Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
||||
instanceid, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Volume is already attached to node.
|
||||
glog.V(4).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
|
||||
} else {
|
||||
glog.V(4).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName)
|
||||
getLunMutex.LockKey(instanceid)
|
||||
defer getLunMutex.UnlockKey(instanceid)
|
||||
|
||||
lun, err = diskController.GetNextDiskLun(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no LUN available for instance %q (%v)", nodeName, err)
|
||||
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err)
|
||||
}
|
||||
glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
|
||||
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
|
||||
if err == nil {
|
||||
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
|
||||
} else {
|
||||
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
|
||||
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(lun)), err
|
||||
}
|
||||
|
||||
func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
volumeSpecMap := make(map[string]*volume.Spec)
|
||||
volumeIDList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
volumeIDList = append(volumeIDList, volumeSource.DiskName)
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumeSpecMap[volumeSource.DiskName] = spec
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(a.plugin.host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v",
|
||||
volumeIDList, nodeName, err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
for volumeID, attached := range attachedResult {
|
||||
if !attached {
|
||||
spec := volumeSpecMap[volumeID]
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
||||
var err error
|
||||
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(a.plugin.host)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nodeName := types.NodeName(a.plugin.host.GetHostName())
|
||||
diskName := volumeSource.DiskName
|
||||
|
||||
glog.V(5).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)",
|
||||
diskName, volumeSource.DataDiskURI, nodeName, devicePath)
|
||||
lun, err := diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
glog.V(5).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun)
|
||||
exec := a.plugin.host.GetExec(a.plugin.GetPluginName())
|
||||
|
||||
io := &osIOHandler{}
|
||||
scsiHostRescan(io, exec)
|
||||
|
||||
newDevicePath := ""
|
||||
|
||||
err = wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
if newDevicePath, err = findDiskByLun(int(lun), io, exec); err != nil {
|
||||
return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err)
|
||||
}
|
||||
|
||||
// did we find it?
|
||||
if newDevicePath != "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun)
|
||||
})
|
||||
|
||||
return newDevicePath, err
|
||||
}
|
||||
|
||||
// to avoid name conflicts (similar *.vhd name)
|
||||
// we use hash diskUri and we use it as device mount target.
|
||||
// this is generalized for both managed and blob disks
|
||||
// we also prefix the hash with m/b based on disk kind
|
||||
func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if volumeSource.Kind == nil { // this spec was constructed from info on the node
|
||||
pdPath := filepath.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI)
|
||||
return pdPath, nil
|
||||
}
|
||||
|
||||
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
|
||||
}
|
||||
|
||||
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.plugin.host.GetMounter(azureDataDiskPluginName)
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
dir := deviceMountPath
|
||||
if runtime.GOOS == "windows" {
|
||||
// in windows, as we use mklink, only need to MkdirAll for parent directory
|
||||
dir = filepath.Dir(deviceMountPath)
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err)
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !notMnt {
|
||||
// testing original mount point, make sure the mount link is valid
|
||||
if _, err := (&osIOHandler{}).ReadDir(deviceMountPath); err != nil {
|
||||
// mount link is invalid, now unmount and remount later
|
||||
glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err)
|
||||
if err := mounter.Unmount(deviceMountPath); err != nil {
|
||||
glog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err)
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if notMnt {
|
||||
diskMounter := util.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host)
|
||||
mountOptions := util.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
|
||||
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr)
|
||||
}
|
||||
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detach detaches disk from Azure VM.
|
||||
func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error {
|
||||
if diskURI == "" {
|
||||
return fmt.Errorf("invalid disk to detach: %q", diskURI)
|
||||
}
|
||||
|
||||
instanceid, err := d.cloud.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("detach %v from node %q", diskURI, nodeName)
|
||||
|
||||
diskController, err := getDiskController(d.plugin.host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getLunMutex.LockKey(instanceid)
|
||||
defer getLunMutex.UnlockKey(instanceid)
|
||||
|
||||
err = diskController.DetachDiskByName("", diskURI, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmountDevice unmounts the volume on the node
|
||||
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
err := util.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
|
||||
if err == nil {
|
||||
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
|
||||
} else {
|
||||
glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
201
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go
generated
vendored
201
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go
generated
vendored
@ -1,201 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
libstrings "strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStorageAccountType = storage.StandardLRS
|
||||
defaultAzureDiskKind = v1.AzureSharedBlobDisk
|
||||
defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingNone
|
||||
)
|
||||
|
||||
type dataDisk struct {
|
||||
volume.MetricsProvider
|
||||
volumeName string
|
||||
diskName string
|
||||
podUID types.UID
|
||||
plugin *azureDataDiskPlugin
|
||||
}
|
||||
|
||||
var (
|
||||
supportedCachingModes = sets.NewString(
|
||||
string(api.AzureDataDiskCachingNone),
|
||||
string(api.AzureDataDiskCachingReadOnly),
|
||||
string(api.AzureDataDiskCachingReadWrite))
|
||||
|
||||
supportedDiskKinds = sets.NewString(
|
||||
string(api.AzureSharedBlobDisk),
|
||||
string(api.AzureDedicatedBlobDisk),
|
||||
string(api.AzureManagedDisk))
|
||||
|
||||
supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS", "Standard_GRS", "Standard_RAGRS")
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(azureDataDiskPluginName), volName)
|
||||
}
|
||||
|
||||
// creates a unique path for disks (even if they share the same *.vhd name)
|
||||
func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) {
|
||||
diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps.
|
||||
uniqueDiskNameTemplate := "%s%s"
|
||||
hashedDiskUri := azure.MakeCRC32(diskUri)
|
||||
prefix := "b"
|
||||
if isManaged {
|
||||
prefix = "m"
|
||||
}
|
||||
// "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }"
|
||||
diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri)
|
||||
pdPath := filepath.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName)
|
||||
|
||||
return pdPath, nil
|
||||
}
|
||||
|
||||
func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost, plugin *azureDataDiskPlugin) *dataDisk {
|
||||
var metricProvider volume.MetricsProvider
|
||||
if podUID != "" {
|
||||
metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host))
|
||||
}
|
||||
|
||||
return &dataDisk{
|
||||
MetricsProvider: metricProvider,
|
||||
volumeName: volumeName,
|
||||
diskName: diskName,
|
||||
podUID: podUID,
|
||||
plugin: plugin,
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (volumeSource *v1.AzureDiskVolumeSource, readOnly bool, err error) {
|
||||
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
|
||||
return spec.Volume.AzureDisk, spec.Volume.AzureDisk.ReadOnly != nil && *spec.Volume.AzureDisk.ReadOnly, nil
|
||||
}
|
||||
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
|
||||
return spec.PersistentVolume.Spec.AzureDisk, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type")
|
||||
}
|
||||
|
||||
func normalizeKind(kind string) (v1.AzureDataDiskKind, error) {
|
||||
if kind == "" {
|
||||
return defaultAzureDiskKind, nil
|
||||
}
|
||||
|
||||
if !supportedDiskKinds.Has(kind) {
|
||||
return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List())
|
||||
}
|
||||
|
||||
return v1.AzureDataDiskKind(kind), nil
|
||||
}
|
||||
|
||||
func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) {
|
||||
if storageAccountType == "" {
|
||||
return defaultStorageAccountType, nil
|
||||
}
|
||||
|
||||
if !supportedStorageAccountTypes.Has(storageAccountType) {
|
||||
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List())
|
||||
}
|
||||
|
||||
return storage.SkuName(storageAccountType), nil
|
||||
}
|
||||
|
||||
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {
|
||||
if cachingMode == "" {
|
||||
return defaultAzureDataDiskCachingMode, nil
|
||||
}
|
||||
|
||||
if !supportedCachingModes.Has(string(cachingMode)) {
|
||||
return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List())
|
||||
}
|
||||
|
||||
return cachingMode, nil
|
||||
}
|
||||
|
||||
type ioHandler interface {
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
WriteFile(filename string, data []byte, perm os.FileMode) error
|
||||
Readlink(name string) (string, error)
|
||||
ReadFile(filename string) ([]byte, error)
|
||||
}
|
||||
|
||||
//TODO: check if priming the iscsi interface is actually needed
|
||||
|
||||
type osIOHandler struct{}
|
||||
|
||||
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
|
||||
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return ioutil.WriteFile(filename, data, perm)
|
||||
}
|
||||
|
||||
func (handler *osIOHandler) Readlink(name string) (string, error) {
|
||||
return os.Readlink(name)
|
||||
}
|
||||
|
||||
func (handler *osIOHandler) ReadFile(filename string) ([]byte, error) {
|
||||
return ioutil.ReadFile(filename)
|
||||
}
|
||||
|
||||
func getDiskController(host volume.VolumeHost) (DiskController, error) {
|
||||
cloudProvider := host.GetCloudProvider()
|
||||
az, ok := cloudProvider.(*azure.Cloud)
|
||||
|
||||
if !ok || az == nil {
|
||||
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
return az, nil
|
||||
}
|
||||
|
||||
func getCloud(host volume.VolumeHost) (*azure.Cloud, error) {
|
||||
cloudProvider := host.GetCloudProvider()
|
||||
az, ok := cloudProvider.(*azure.Cloud)
|
||||
|
||||
if !ok || az == nil {
|
||||
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
return az, nil
|
||||
}
|
||||
|
||||
func strFirstLetterToUpper(str string) string {
|
||||
if len(str) < 2 {
|
||||
return str
|
||||
}
|
||||
return libstrings.ToUpper(string(str[0])) + str[1:]
|
||||
}
|
180
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go
generated
vendored
180
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go
generated
vendored
@ -1,180 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
libstrings "strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// exclude those used by azure as resource and OS root in /dev/disk/azure
|
||||
func listAzureDiskPath(io ioHandler) []string {
|
||||
azureDiskPath := "/dev/disk/azure/"
|
||||
var azureDiskList []string
|
||||
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
diskPath := azureDiskPath + name
|
||||
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
|
||||
sd := link[(libstrings.LastIndex(link, "/") + 1):]
|
||||
azureDiskList = append(azureDiskList, sd)
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
|
||||
return azureDiskList
|
||||
}
|
||||
|
||||
// getDiskLinkByDevName get disk link by device name from devLinkPath, e.g. /dev/disk/azure/, /dev/disk/by-id/
|
||||
func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, error) {
|
||||
dirs, err := io.ReadDir(devLinkPath)
|
||||
glog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath)
|
||||
if err == nil {
|
||||
for _, f := range dirs {
|
||||
diskPath := devLinkPath + f.Name()
|
||||
glog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath)
|
||||
link, linkErr := io.Readlink(diskPath)
|
||||
if linkErr != nil {
|
||||
glog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr)
|
||||
continue
|
||||
}
|
||||
if libstrings.HasSuffix(link, devName) {
|
||||
return diskPath, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("device name(%s) is not found under %s", devName, devLinkPath)
|
||||
}
|
||||
return "", fmt.Errorf("read %s error: %v", devLinkPath, err)
|
||||
}
|
||||
|
||||
func scsiHostRescan(io ioHandler, exec mount.Exec) {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsi_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
if err = io.WriteFile(name, data, 0666); err != nil {
|
||||
glog.Warningf("failed to rescan scsi host %s", name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.Warningf("failed to read %s, err %v", scsi_path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) {
|
||||
azureDisks := listAzureDiskPath(io)
|
||||
return findDiskByLunWithConstraint(lun, io, azureDisks)
|
||||
}
|
||||
|
||||
// finds a device mounted to "current" node
|
||||
func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (string, error) {
|
||||
var err error
|
||||
sys_path := "/sys/bus/scsi/devices"
|
||||
if dirs, err := io.ReadDir(sys_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
// look for path like /sys/bus/scsi/devices/3:0:0:1
|
||||
arr := libstrings.Split(name, ":")
|
||||
if len(arr) < 4 {
|
||||
continue
|
||||
}
|
||||
if len(azureDisks) == 0 {
|
||||
glog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name)
|
||||
target, err := strconv.Atoi(arr[0])
|
||||
if err != nil {
|
||||
glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
|
||||
continue
|
||||
}
|
||||
// as observed, targets 0-3 are used by OS disks. Skip them
|
||||
if target <= 3 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// extract LUN from the path.
|
||||
// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
|
||||
l, err := strconv.Atoi(arr[3])
|
||||
if err != nil {
|
||||
// unknown path format, continue to read the next one
|
||||
glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err)
|
||||
continue
|
||||
}
|
||||
if lun == l {
|
||||
// find the matching LUN
|
||||
// read vendor and model to ensure it is a VHD disk
|
||||
vendorPath := filepath.Join(sys_path, name, "vendor")
|
||||
vendorBytes, err := io.ReadFile(vendorPath)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to read device vendor, err: %v", err)
|
||||
continue
|
||||
}
|
||||
vendor := libstrings.TrimSpace(string(vendorBytes))
|
||||
if libstrings.ToUpper(vendor) != "MSFT" {
|
||||
glog.V(4).Infof("vendor doesn't match VHD, got %s", vendor)
|
||||
continue
|
||||
}
|
||||
|
||||
modelPath := filepath.Join(sys_path, name, "model")
|
||||
modelBytes, err := io.ReadFile(modelPath)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to read device model, err: %v", err)
|
||||
continue
|
||||
}
|
||||
model := libstrings.TrimSpace(string(modelBytes))
|
||||
if libstrings.ToUpper(model) != "VIRTUAL DISK" {
|
||||
glog.V(4).Infof("model doesn't match VHD, got %s", model)
|
||||
continue
|
||||
}
|
||||
|
||||
// find a disk, validate name
|
||||
dir := filepath.Join(sys_path, name, "block")
|
||||
if dev, err := io.ReadDir(dir); err == nil {
|
||||
found := false
|
||||
devName := dev[0].Name()
|
||||
for _, diskName := range azureDisks {
|
||||
glog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName)
|
||||
if devName == diskName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
devLinkPaths := []string{"/dev/disk/azure/scsi1/", "/dev/disk/by-id/"}
|
||||
for _, devLinkPath := range devLinkPaths {
|
||||
diskPath, err := getDiskLinkByDevName(io, devLinkPath, devName)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath)
|
||||
return diskPath, nil
|
||||
}
|
||||
glog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err)
|
||||
}
|
||||
return "/dev/" + devName, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
136
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_test.go
generated
vendored
136
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_test.go
generated
vendored
@ -1,136 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
type fakeFileInfo struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Mode() os.FileMode {
|
||||
return 777
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) ModTime() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
func (fi *fakeFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
lun = 1
|
||||
lunStr = "1"
|
||||
diskPath = "4:0:0:" + lunStr
|
||||
devName = "sdd"
|
||||
lun1 = 2
|
||||
lunStr1 = "2"
|
||||
diskPath1 = "3:0:0:" + lunStr1
|
||||
devName1 = "sde"
|
||||
)
|
||||
|
||||
type fakeIOHandler struct{}
|
||||
|
||||
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
switch dirname {
|
||||
case "/sys/bus/scsi/devices":
|
||||
f1 := &fakeFileInfo{
|
||||
name: "3:0:0:1",
|
||||
}
|
||||
f2 := &fakeFileInfo{
|
||||
name: "4:0:0:0",
|
||||
}
|
||||
f3 := &fakeFileInfo{
|
||||
name: diskPath,
|
||||
}
|
||||
f4 := &fakeFileInfo{
|
||||
name: "host1",
|
||||
}
|
||||
f5 := &fakeFileInfo{
|
||||
name: "target2:0:0",
|
||||
}
|
||||
return []os.FileInfo{f1, f2, f3, f4, f5}, nil
|
||||
case "/sys/bus/scsi/devices/" + diskPath + "/block":
|
||||
n := &fakeFileInfo{
|
||||
name: devName,
|
||||
}
|
||||
return []os.FileInfo{n}, nil
|
||||
case "/sys/bus/scsi/devices/" + diskPath1 + "/block":
|
||||
n := &fakeFileInfo{
|
||||
name: devName1,
|
||||
}
|
||||
return []os.FileInfo{n}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("bad dir")
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) Readlink(name string) (string, error) {
|
||||
return "/dev/azure/disk/sda", nil
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) ReadFile(filename string) ([]byte, error) {
|
||||
if strings.HasSuffix(filename, "vendor") {
|
||||
return []byte("Msft \n"), nil
|
||||
}
|
||||
if strings.HasSuffix(filename, "model") {
|
||||
return []byte("Virtual Disk \n"), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown file")
|
||||
}
|
||||
|
||||
func TestIoHandler(t *testing.T) {
|
||||
if runtime.GOOS != "windows" && runtime.GOOS != "linux" {
|
||||
t.Skipf("TestIoHandler not supported on GOOS=%s", runtime.GOOS)
|
||||
}
|
||||
disk, err := findDiskByLun(lun, &fakeIOHandler{}, mount.NewOsExec())
|
||||
if runtime.GOOS == "windows" {
|
||||
if err != nil {
|
||||
t.Errorf("no data disk found: disk %v err %v", disk, err)
|
||||
}
|
||||
} else {
|
||||
// if no disk matches lun, exit
|
||||
if disk != "/dev/"+devName || err != nil {
|
||||
t.Errorf("no data disk found: disk %v err %v", disk, err)
|
||||
}
|
||||
}
|
||||
}
|
28
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_unsupported.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_unsupported.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
// +build !linux,!windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import "k8s.io/kubernetes/pkg/util/mount"
|
||||
|
||||
func scsiHostRescan(io ioHandler, exec mount.Exec) {
|
||||
}
|
||||
|
||||
func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) {
|
||||
return "", nil
|
||||
}
|
118
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go
generated
vendored
118
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go
generated
vendored
@ -1,118 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
func scsiHostRescan(io ioHandler, exec mount.Exec) {
|
||||
cmd := "Update-HostStorageCache"
|
||||
output, err := exec.Run("powershell", "/c", cmd)
|
||||
if err != nil {
|
||||
glog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output))
|
||||
}
|
||||
}
|
||||
|
||||
// search Windows disk number by LUN
|
||||
func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error) {
|
||||
cmd := `Get-Disk | select number, location | ConvertTo-Json`
|
||||
output, err := exec.Run("powershell", "/c", cmd)
|
||||
if err != nil {
|
||||
glog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output))
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(output) < 10 {
|
||||
return "", fmt.Errorf("Get-Disk output is too short, output: %q", string(output))
|
||||
}
|
||||
|
||||
var data []map[string]interface{}
|
||||
if err = json.Unmarshal(output, &data); err != nil {
|
||||
glog.Errorf("Get-Disk output is not a json array, output: %q", string(output))
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, v := range data {
|
||||
if jsonLocation, ok := v["location"]; ok {
|
||||
if location, ok := jsonLocation.(string); ok {
|
||||
if !strings.Contains(location, " LUN ") {
|
||||
continue
|
||||
}
|
||||
|
||||
arr := strings.Split(location, " ")
|
||||
arrLen := len(arr)
|
||||
if arrLen < 3 {
|
||||
glog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1])
|
||||
//last element of location field is LUN number, e.g.
|
||||
// "location": "Integrated : Adapter 3 : Port 0 : Target 0 : LUN 1"
|
||||
l, err := strconv.Atoi(arr[arrLen-1])
|
||||
if err != nil {
|
||||
glog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1])
|
||||
continue
|
||||
}
|
||||
|
||||
if l == lun {
|
||||
glog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun)
|
||||
if d, ok := v["number"]; ok {
|
||||
if diskNum, ok := d.(float64); ok {
|
||||
glog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun)
|
||||
return strconv.Itoa(int(diskNum)), nil
|
||||
}
|
||||
glog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location)
|
||||
}
|
||||
return "", fmt.Errorf("LUN(%d) found, but could not get disk number, location: %q", lun, location)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
|
||||
if err := mount.ValidateDiskNumber(disk); err != nil {
|
||||
glog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(fstype) == 0 {
|
||||
// Use 'NTFS' as the default
|
||||
fstype = "NTFS"
|
||||
}
|
||||
cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru", disk)
|
||||
cmd += fmt.Sprintf(" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", fstype)
|
||||
output, err := exec.Run("powershell", "/c", cmd)
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output))
|
||||
} else {
|
||||
glog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype)
|
||||
}
|
||||
}
|
271
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
271
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
@ -1,271 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// interface exposed by the cloud provider implementing Disk functionality
|
||||
type DiskController interface {
|
||||
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
|
||||
DeleteBlobDisk(diskUri string) error
|
||||
|
||||
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, resourceGroup string, sizeGB int, tags map[string]string) (string, error)
|
||||
DeleteManagedDisk(diskURI string) error
|
||||
|
||||
// Attaches the disk to the host machine.
|
||||
AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// Detaches the disk, identified by disk name or uri, from the host machine.
|
||||
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
|
||||
|
||||
// Check if a list of volumes are attached to the node with the specified NodeName
|
||||
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
|
||||
|
||||
// Get the LUN number of the disk that is attached to the host
|
||||
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
|
||||
// Get the next available LUN number to attach a new VHD
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
|
||||
// Create a VHD blob
|
||||
CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error)
|
||||
// Delete a VHD blob
|
||||
DeleteVolume(diskURI string) error
|
||||
|
||||
// Expand the disk to new size
|
||||
ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
|
||||
}
|
||||
|
||||
type azureDataDiskPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.VolumePluginWithAttachLimits = &azureDataDiskPlugin{}
|
||||
var _ volume.ExpandableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
const (
|
||||
azureDataDiskPluginName = "kubernetes.io/azure-disk"
|
||||
)
|
||||
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetPluginName() string {
|
||||
return azureDataDiskPluginName
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.DataDiskURI, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.AzureDisk != nil)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) SupportsMountOption() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) {
|
||||
volumeLimits := map[string]int64{
|
||||
util.AzureVolumeLimitKey: 16,
|
||||
}
|
||||
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
|
||||
// if we can't fetch cloudprovider we return an error
|
||||
// hoping external CCM or admin can set it. Returning
|
||||
// default values from here will mean, no one can
|
||||
// override them.
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("No cloudprovider present")
|
||||
}
|
||||
|
||||
if cloud.ProviderName() != azure.CloudProviderName {
|
||||
return nil, fmt.Errorf("Expected Azure cloudprovider, got %s", cloud.ProviderName())
|
||||
}
|
||||
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) VolumeLimitKey(spec *volume.Spec) string {
|
||||
return util.AzureVolumeLimitKey
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
// NewAttacher initializes an Attacher
|
||||
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
azure, err := getCloud(plugin.host)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &azureDiskAttacher{
|
||||
plugin: plugin,
|
||||
cloud: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
azure, err := getCloud(plugin.host)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &azureDiskDetacher{
|
||||
plugin: plugin,
|
||||
cloud: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host, plugin)
|
||||
|
||||
return &azureDiskDeleter{
|
||||
spec: spec,
|
||||
plugin: plugin,
|
||||
dataDisk: disk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.PVC.Spec.AccessModes) == 0 {
|
||||
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return &azureDiskProvisioner{
|
||||
plugin: plugin,
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host, plugin)
|
||||
|
||||
return &azureDiskMounter{
|
||||
plugin: plugin,
|
||||
spec: spec,
|
||||
options: options,
|
||||
dataDisk: disk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
disk := makeDataDisk(volName, podUID, "", plugin.host, plugin)
|
||||
|
||||
return &azureDiskUnmounter{
|
||||
plugin: plugin,
|
||||
dataDisk: disk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) RequiresFSResize() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ExpandVolumeDevice(
|
||||
spec *volume.Spec,
|
||||
newSize resource.Quantity,
|
||||
oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.AzureDisk == nil {
|
||||
return oldSize, fmt.Errorf("invalid PV spec")
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(plugin.host)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
return diskController.ResizeDisk(spec.PersistentVolume.Spec.AzureDisk.DataDiskURI, oldSize, newSize)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
azureVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DataDiskURI: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(azureVolume), nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
m := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
return mount.GetMountRefs(m, deviceMountPath)
|
||||
}
|
161
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go
generated
vendored
161
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.BlockVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(azureDataDiskPluginName)
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID)
|
||||
|
||||
globalMapPath := filepath.Dir(globalMapPathUUID)
|
||||
if len(globalMapPath) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
|
||||
}
|
||||
|
||||
return getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName)
|
||||
}
|
||||
|
||||
func getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName string) (*volume.Spec, error) {
|
||||
// Get volume spec information from globalMapPath
|
||||
// globalMapPath example:
|
||||
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
|
||||
// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX
|
||||
diskName := filepath.Base(globalMapPath)
|
||||
if len(diskName) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get diskName from global path=%s", globalMapPath)
|
||||
}
|
||||
glog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName)
|
||||
block := v1.PersistentVolumeBlock
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: volumeName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: diskName,
|
||||
},
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(pv, true), nil
|
||||
}
|
||||
|
||||
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
|
||||
func (plugin *azureDataDiskPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
|
||||
// Pass empty string as dummy uid since uid isn't used in the case.
|
||||
var uid types.UID
|
||||
if pod != nil {
|
||||
uid = pod.UID
|
||||
}
|
||||
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disk := makeDataDisk(spec.Name(), podUID, volumeSource.DiskName, plugin.host, plugin)
|
||||
|
||||
return &azureDataDiskMapper{
|
||||
dataDisk: disk,
|
||||
readOnly: readOnly,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return plugin.newUnmapperInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newUnmapperInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) {
|
||||
disk := makeDataDisk(volName, podUID, "", plugin.host, plugin)
|
||||
return &azureDataDiskUnmapper{dataDisk: disk}, nil
|
||||
}
|
||||
|
||||
func (c *azureDataDiskUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type azureDataDiskUnmapper struct {
|
||||
*dataDisk
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &azureDataDiskUnmapper{}
|
||||
|
||||
type azureDataDiskMapper struct {
|
||||
*dataDisk
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &azureDataDiskMapper{}
|
||||
|
||||
func (b *azureDataDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *azureDataDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
|
||||
// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX
|
||||
func (disk *dataDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(disk.plugin.host.GetVolumeDevicePluginDir(azureDataDiskPluginName), string(volumeSource.DiskName)), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~azure
|
||||
func (disk *dataDisk) GetPodDeviceMapPath() (string, string) {
|
||||
name := azureDataDiskPluginName
|
||||
return disk.plugin.host.GetPodVolumeDeviceDir(disk.podUID, kstrings.EscapeQualifiedNameForDisk(name)), disk.volumeName
|
||||
}
|
145
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block_test.go
generated
vendored
145
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block_test.go
generated
vendored
@ -1,145 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testDiskName = "disk1"
|
||||
testPVName = "pv1"
|
||||
testGlobalPath = "plugins/kubernetes.io/azure-disk/volumeDevices/disk1"
|
||||
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~azure-disk"
|
||||
)
|
||||
|
||||
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
|
||||
// make our test path for fake GlobalMapPath
|
||||
// /tmp symbolized our pluginDir
|
||||
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/azure-disk/volumeDevices/disk1
|
||||
tmpVDir, err := utiltesting.MkTmpdir("azureDiskBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
|
||||
//Bad Path
|
||||
badspec, err := getVolumeSpecFromGlobalMapPath("", "")
|
||||
if badspec != nil || err == nil {
|
||||
t.Errorf("Expected not to get spec from GlobalMapPath but did")
|
||||
}
|
||||
|
||||
// Good Path
|
||||
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath, "")
|
||||
if spec == nil || err != nil {
|
||||
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
|
||||
}
|
||||
if spec.PersistentVolume.Spec.AzureDisk.DiskName != testDiskName {
|
||||
t.Errorf("Invalid pdName from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.AzureDisk.DiskName)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
specMode := spec.PersistentVolume.Spec.VolumeMode
|
||||
if &specMode == nil {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", &specMode, block)
|
||||
}
|
||||
if *specMode != block {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestVolume(readOnly bool, path string, isBlock bool) *volume.Spec {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPVName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: testDiskName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
pv.Spec.VolumeMode = &blockMode
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
||||
}
|
||||
|
||||
func TestGetPodAndPluginMapPaths(t *testing.T) {
|
||||
tmpVDir, err := utiltesting.MkTmpdir("azureDiskBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := filepath.Join(tmpVDir, testPodPath)
|
||||
|
||||
spec := getTestVolume(false, tmpVDir, true /*isBlock*/)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
|
||||
plug, err := plugMgr.FindMapperPluginByName(azureDataDiskPluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpVDir)
|
||||
t.Fatalf("Can't find the plugin by name: %q", azureDataDiskPluginName)
|
||||
}
|
||||
if plug.GetPluginName() != azureDataDiskPluginName {
|
||||
t.Fatalf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mapper == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
//GetGlobalMapPath
|
||||
gMapPath, err := mapper.GetGlobalMapPath(spec)
|
||||
if err != nil || len(gMapPath) == 0 {
|
||||
t.Fatalf("Invalid GlobalMapPath from spec: %s, error: %v", spec.PersistentVolume.Spec.AzureDisk.DiskName, err)
|
||||
}
|
||||
if gMapPath != expectedGlobalPath {
|
||||
t.Errorf("Failed to get GlobalMapPath: %s, expected %s", gMapPath, expectedGlobalPath)
|
||||
}
|
||||
|
||||
//GetPodDeviceMapPath
|
||||
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
|
||||
if gDevicePath != expectedPodPath {
|
||||
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
|
||||
}
|
||||
if gVolName != testPVName {
|
||||
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
|
||||
}
|
||||
}
|
55
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_test.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_test.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("azure_dd")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != azureDataDiskPluginName {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
// fakeAzureProvider type was removed because all functions were not used
|
||||
// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test.
|
210
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go
generated
vendored
210
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go
generated
vendored
@ -1,210 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type azureDiskMounter struct {
|
||||
*dataDisk
|
||||
spec *volume.Spec
|
||||
plugin *azureDataDiskPlugin
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
type azureDiskUnmounter struct {
|
||||
*dataDisk
|
||||
plugin *azureDataDiskPlugin
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &azureDiskUnmounter{}
|
||||
var _ volume.Mounter = &azureDiskMounter{}
|
||||
|
||||
func (m *azureDiskMounter) GetAttributes() volume.Attributes {
|
||||
readOnly := false
|
||||
volumeSource, _, err := getVolumeSource(m.spec)
|
||||
if err != nil {
|
||||
glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err)
|
||||
} else if volumeSource.ReadOnly != nil {
|
||||
readOnly = *volumeSource.ReadOnly
|
||||
}
|
||||
return volume.Attributes{
|
||||
ReadOnly: readOnly,
|
||||
Managed: !readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return m.SetUpAt(m.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) GetPath() string {
|
||||
return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host)
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
mounter := m.plugin.host.GetMounter(m.plugin.GetPluginName())
|
||||
volumeSource, _, err := getVolumeSource(m.spec)
|
||||
|
||||
if err != nil {
|
||||
glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
|
||||
return err
|
||||
}
|
||||
|
||||
diskName := volumeSource.DiskName
|
||||
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err)
|
||||
return err
|
||||
}
|
||||
if !mountPoint {
|
||||
// testing original mount point, make sure the mount link is valid
|
||||
_, err := (&osIOHandler{}).ReadDir(dir)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("azureDisk - already mounted to target %s", dir)
|
||||
return nil
|
||||
}
|
||||
// mount link is invalid, now unmount and remount later
|
||||
glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err)
|
||||
if err := mounter.Unmount(dir); err != nil {
|
||||
glog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err)
|
||||
return err
|
||||
}
|
||||
mountPoint = true
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
// in windows, we will use mklink to mount, will MkdirAll in Mount func
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
options := []string{"bind"}
|
||||
|
||||
if volumeSource.ReadOnly != nil && *volumeSource.ReadOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
if m.options.MountOptions != nil {
|
||||
options = util.JoinMountOptions(m.options.MountOptions, options)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
|
||||
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mountErr := mounter.Mount(globalPDPath, dir, *volumeSource.FSType, options)
|
||||
// Everything in the following control flow is meant as an
|
||||
// attempt cleanup a failed setupAt (bind mount)
|
||||
if mountErr != nil {
|
||||
glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr)
|
||||
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr)
|
||||
}
|
||||
|
||||
if !mountPoint {
|
||||
if err = mounter.Unmount(dir); err != nil {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
|
||||
}
|
||||
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
|
||||
}
|
||||
if !mountPoint {
|
||||
// not cool. leave for next sync loop.
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount(). Will try again next sync loop.", diskName, dir, mountErr)
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.Remove(dir); err != nil {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr)
|
||||
return mountErr
|
||||
}
|
||||
|
||||
if volumeSource.ReadOnly == nil || !*volumeSource.ReadOnly {
|
||||
volume.SetVolumeOwnership(m, fsGroup)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *azureDiskUnmounter) TearDown() error {
|
||||
return u.TearDownAt(u.GetPath())
|
||||
}
|
||||
|
||||
func (u *azureDiskUnmounter) TearDownAt(dir string) error {
|
||||
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
|
||||
mounter := u.plugin.host.GetMounter(u.plugin.GetPluginName())
|
||||
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err)
|
||||
}
|
||||
if mountPoint {
|
||||
if err := os.Remove(dir); err != nil {
|
||||
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err)
|
||||
}
|
||||
}
|
||||
if err := mounter.Unmount(dir); err != nil {
|
||||
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err)
|
||||
}
|
||||
mountPoint, err = mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err)
|
||||
}
|
||||
|
||||
if mountPoint {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir")
|
||||
}
|
||||
|
||||
func (u *azureDiskUnmounter) GetPath() string {
|
||||
return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host)
|
||||
}
|
210
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go
generated
vendored
210
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go
generated
vendored
@ -1,210 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type azureDiskProvisioner struct {
|
||||
plugin *azureDataDiskPlugin
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
type azureDiskDeleter struct {
|
||||
*dataDisk
|
||||
spec *volume.Spec
|
||||
plugin *azureDataDiskPlugin
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &azureDiskProvisioner{}
|
||||
var _ volume.Deleter = &azureDiskDeleter{}
|
||||
|
||||
func (d *azureDiskDeleter) GetPath() string {
|
||||
return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host)
|
||||
}
|
||||
|
||||
func (d *azureDiskDeleter) Delete() error {
|
||||
volumeSource, _, err := getVolumeSource(d.spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(d.plugin.host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
managed := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
|
||||
if managed {
|
||||
return diskController.DeleteManagedDisk(volumeSource.DataDiskURI)
|
||||
}
|
||||
|
||||
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI)
|
||||
}
|
||||
|
||||
func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
supportedModes := p.plugin.GetAccessModes()
|
||||
|
||||
// perform static validation first
|
||||
if p.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
|
||||
}
|
||||
|
||||
if len(p.options.PVC.Spec.AccessModes) > 1 {
|
||||
return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin")
|
||||
}
|
||||
|
||||
if len(p.options.PVC.Spec.AccessModes) == 1 {
|
||||
if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] {
|
||||
return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
location, account string
|
||||
storageAccountType, fsType string
|
||||
cachingMode v1.AzureDataDiskCachingMode
|
||||
strKind string
|
||||
err error
|
||||
resourceGroup string
|
||||
)
|
||||
// maxLength = 79 - (4 for ".vhd") = 75
|
||||
name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
requestGB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
|
||||
for k, v := range p.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "skuname":
|
||||
storageAccountType = v
|
||||
case "location":
|
||||
location = v
|
||||
case "storageaccount":
|
||||
account = v
|
||||
case "storageaccounttype":
|
||||
storageAccountType = v
|
||||
case "kind":
|
||||
strKind = v
|
||||
case "cachingmode":
|
||||
cachingMode = v1.AzureDataDiskCachingMode(v)
|
||||
case volume.VolumeParameterFSType:
|
||||
fsType = strings.ToLower(v)
|
||||
case "resourcegroup":
|
||||
resourceGroup = v
|
||||
default:
|
||||
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
|
||||
}
|
||||
}
|
||||
|
||||
// normalize values
|
||||
skuName, err := normalizeStorageAccountType(storageAccountType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kind, err := normalizeKind(strFirstLetterToUpper(strKind))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cachingMode, err = normalizeCachingMode(cachingMode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(p.plugin.host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resourceGroup != "" && kind != v1.AzureManagedDisk {
|
||||
return nil, errors.New("StorageClass option 'resourceGroup' can be used only for managed disks")
|
||||
}
|
||||
|
||||
// create disk
|
||||
diskURI := ""
|
||||
if kind == v1.AzureManagedDisk {
|
||||
tags := make(map[string]string)
|
||||
if p.options.CloudTags != nil {
|
||||
tags = *(p.options.CloudTags)
|
||||
}
|
||||
diskURI, err = diskController.CreateManagedDisk(name, skuName, resourceGroup, requestGB, tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if kind == v1.AzureDedicatedBlobDisk {
|
||||
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: p.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
"volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: supportedModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
CachingMode: &cachingMode,
|
||||
DiskName: name,
|
||||
DataDiskURI: diskURI,
|
||||
Kind: &kind,
|
||||
FSType: &fsType,
|
||||
},
|
||||
},
|
||||
MountOptions: p.options.MountOptions,
|
||||
},
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
pv.Spec.VolumeMode = p.options.PVC.Spec.VolumeMode
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
63
vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD
generated
vendored
63
vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD
generated
vendored
@ -1,63 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"azure_file.go",
|
||||
"azure_provision.go",
|
||||
"azure_util.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/azure_file",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["azure_file_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
18
vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS
generated
vendored
@ -1,18 +0,0 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- feiskyer
|
||||
- karataliu
|
||||
- khenidak
|
||||
- rootfs
|
||||
reviewers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- feiskyer
|
||||
- jsafrane
|
||||
- jingxu97
|
||||
- karataliu
|
||||
- khenidak
|
||||
- msau42
|
||||
- rootfs
|
||||
- saad-ali
|
381
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
381
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
@ -1,381 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the primary endpoint for volume plugins
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&azureFilePlugin{nil}}
|
||||
}
|
||||
|
||||
type azureFilePlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &azureFilePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &azureFilePlugin{}
|
||||
var _ volume.ExpandableVolumePlugin = &azureFilePlugin{}
|
||||
|
||||
const (
|
||||
azureFilePluginName = "kubernetes.io/azure-file"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(azureFilePluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) GetPluginName() string {
|
||||
return azureFilePluginName
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
share, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return share, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) CanSupport(spec *volume.Spec) bool {
|
||||
//TODO: check if mount.cifs is there
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureFile != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.AzureFile != nil)
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) SupportsMountOption() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod, &azureSvc{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
|
||||
share, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secretName, secretNamespace, err := getSecretNameAndNamespace(spec, pod.Namespace)
|
||||
return &azureFileMounter{
|
||||
azureFile: &azureFile{
|
||||
volName: spec.Name(),
|
||||
mounter: mounter,
|
||||
pod: pod,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(pod.UID, spec.Name(), plugin.host)),
|
||||
},
|
||||
util: util,
|
||||
secretNamespace: secretNamespace,
|
||||
secretName: secretName,
|
||||
shareName: share,
|
||||
readOnly: readOnly,
|
||||
mountOptions: volutil.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &azureFileUnmounter{&azureFile{
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) RequiresFSResize() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) ExpandVolumeDevice(
|
||||
spec *volume.Spec,
|
||||
newSize resource.Quantity,
|
||||
oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.AzureFile == nil {
|
||||
return oldSize, fmt.Errorf("invalid PV spec")
|
||||
}
|
||||
shareName := spec.PersistentVolume.Spec.AzureFile.ShareName
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
secretName, secretNamespace, err := getSecretNameAndNamespace(spec, spec.PersistentVolume.Spec.ClaimRef.Namespace)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
accountName, accountKey, err := (&azureSvc{}).GetAzureCredentials(plugin.host, secretNamespace, secretName)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
if err := azure.ResizeFileShare(accountName, accountKey, shareName, int(volutil.RoundUpToGiB(newSize))); err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
return newSize, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
azureVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureFile: &v1.AzureFileVolumeSource{
|
||||
SecretName: volName,
|
||||
ShareName: volName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(azureVolume), nil
|
||||
}
|
||||
|
||||
// azureFile volumes represent mount of an AzureFile share.
|
||||
type azureFile struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
pod *v1.Pod
|
||||
mounter mount.Interface
|
||||
plugin *azureFilePlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
func (azureFileVolume *azureFile) GetPath() string {
|
||||
return getPath(azureFileVolume.pod.UID, azureFileVolume.volName, azureFileVolume.plugin.host)
|
||||
}
|
||||
|
||||
type azureFileMounter struct {
|
||||
*azureFile
|
||||
util azureUtil
|
||||
secretName string
|
||||
secretNamespace string
|
||||
shareName string
|
||||
readOnly bool
|
||||
mountOptions []string
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &azureFileMounter{}
|
||||
|
||||
func (b *azureFileMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *azureFileMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *azureFileMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// testing original mount point, make sure the mount link is valid
|
||||
if _, err := ioutil.ReadDir(dir); err == nil {
|
||||
glog.V(4).Infof("azureFile - already mounted to target %s", dir)
|
||||
return nil
|
||||
}
|
||||
// mount link is invalid, now unmount and remount later
|
||||
glog.Warningf("azureFile - ReadDir %s failed with %v, unmount this directory", dir, err)
|
||||
if err := b.mounter.Unmount(dir); err != nil {
|
||||
glog.Errorf("azureFile - Unmount directory %s failed with %v", dir, err)
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
}
|
||||
|
||||
var accountKey, accountName string
|
||||
if accountName, accountKey, err = b.util.GetAzureCredentials(b.plugin.host, b.secretNamespace, b.secretName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mountOptions := []string{}
|
||||
source := ""
|
||||
osSeparator := string(os.PathSeparator)
|
||||
source = fmt.Sprintf("%s%s%s.file.%s%s%s", osSeparator, osSeparator, accountName, getStorageEndpointSuffix(b.plugin.host.GetCloudProvider()), osSeparator, b.shareName)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
mountOptions = []string{fmt.Sprintf("AZURE\\%s", accountName), accountKey}
|
||||
} else {
|
||||
os.MkdirAll(dir, 0700)
|
||||
// parameters suggested by https://azure.microsoft.com/en-us/documentation/articles/storage-how-to-use-files-linux/
|
||||
options := []string{fmt.Sprintf("username=%s,password=%s", accountName, accountKey)}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
mountOptions = volutil.JoinMountOptions(b.mountOptions, options)
|
||||
mountOptions = appendDefaultMountOptions(mountOptions, fsGroup)
|
||||
}
|
||||
|
||||
err = b.mounter.Mount(source, dir, "cifs", mountOptions)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &azureFileUnmounter{}
|
||||
|
||||
type azureFileUnmounter struct {
|
||||
*azureFile
|
||||
}
|
||||
|
||||
func (c *azureFileUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *azureFileUnmounter) TearDownAt(dir string) error {
|
||||
return volutil.UnmountPath(dir, c.mounter)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (string, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.AzureFile != nil {
|
||||
share := spec.Volume.AzureFile.ShareName
|
||||
readOnly := spec.Volume.AzureFile.ReadOnly
|
||||
return share, readOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.AzureFile != nil {
|
||||
share := spec.PersistentVolume.Spec.AzureFile.ShareName
|
||||
readOnly := spec.ReadOnly
|
||||
return share, readOnly, nil
|
||||
}
|
||||
return "", false, fmt.Errorf("Spec does not reference an AzureFile volume type")
|
||||
}
|
||||
|
||||
func getSecretNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) {
|
||||
secretName := ""
|
||||
secretNamespace := ""
|
||||
if spec.Volume != nil && spec.Volume.AzureFile != nil {
|
||||
secretName = spec.Volume.AzureFile.SecretName
|
||||
secretNamespace = defaultNamespace
|
||||
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.AzureFile != nil {
|
||||
secretNamespace = defaultNamespace
|
||||
if spec.PersistentVolume.Spec.AzureFile.SecretNamespace != nil {
|
||||
secretNamespace = *spec.PersistentVolume.Spec.AzureFile.SecretNamespace
|
||||
}
|
||||
secretName = spec.PersistentVolume.Spec.AzureFile.SecretName
|
||||
} else {
|
||||
return "", "", fmt.Errorf("Spec does not reference an AzureFile volume type")
|
||||
}
|
||||
|
||||
if len(secretNamespace) == 0 {
|
||||
return "", "", fmt.Errorf("invalid Azure volume: nil namespace")
|
||||
}
|
||||
return secretName, secretNamespace, nil
|
||||
|
||||
}
|
||||
|
||||
func getAzureCloud(cloudProvider cloudprovider.Interface) (*azure.Cloud, error) {
|
||||
azure, ok := cloudProvider.(*azure.Cloud)
|
||||
if !ok || azure == nil {
|
||||
return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
|
||||
return azure, nil
|
||||
}
|
||||
|
||||
func getStorageEndpointSuffix(cloudprovider cloudprovider.Interface) string {
|
||||
const publicCloudStorageEndpointSuffix = "core.windows.net"
|
||||
azure, err := getAzureCloud(cloudprovider)
|
||||
if err != nil {
|
||||
glog.Warningf("No Azure cloud provider found. Using the Azure public cloud endpoint: %s", publicCloudStorageEndpointSuffix)
|
||||
return publicCloudStorageEndpointSuffix
|
||||
}
|
||||
return azure.Environment.StorageEndpointSuffix
|
||||
}
|
414
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file_test.go
generated
vendored
414
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file_test.go
generated
vendored
@ -1,414 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "azureFileTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/azure-file" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureFile: &v1.AzureFileVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureFile: &v1.AzureFilePersistentVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "azureFileTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/azure-file")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
|
||||
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
|
||||
}
|
||||
}
|
||||
|
||||
func getAzureTestCloud(t *testing.T) *azure.Cloud {
|
||||
config := `{
|
||||
"aadClientId": "--aad-client-id--",
|
||||
"aadClientSecret": "--aad-client-secret--"
|
||||
}`
|
||||
configReader := strings.NewReader(config)
|
||||
cloud, err := azure.NewCloud(configReader)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
azureCloud, ok := cloud.(*azure.Cloud)
|
||||
if !ok {
|
||||
t.Error("NewCloud returned incorrect type")
|
||||
}
|
||||
return azureCloud
|
||||
}
|
||||
|
||||
func getTestTempDir(t *testing.T) string {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
return tmpDir
|
||||
}
|
||||
|
||||
func TestPluginAzureCloudProvider(t *testing.T) {
|
||||
tmpDir := getTestTempDir(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
testPlugin(t, tmpDir, volumetest.NewFakeVolumeHostWithCloudProvider(tmpDir, nil, nil, getAzureTestCloud(t)))
|
||||
}
|
||||
|
||||
func TestPluginWithoutCloudProvider(t *testing.T) {
|
||||
tmpDir := getTestTempDir(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
testPlugin(t, tmpDir, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
}
|
||||
|
||||
func TestPluginWithOtherCloudProvider(t *testing.T) {
|
||||
tmpDir := getTestTempDir(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
cloud := &fakecloud.FakeCloud{}
|
||||
testPlugin(t, tmpDir, volumetest.NewFakeVolumeHostWithCloudProvider(tmpDir, nil, nil, cloud))
|
||||
}
|
||||
|
||||
func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumeHost)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureFile: &v1.AzureFileVolumeSource{
|
||||
SecretName: "secret",
|
||||
ShareName: "share",
|
||||
},
|
||||
},
|
||||
}
|
||||
fake := &mount.FakeMounter{}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-file/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureFile: &v1.AzureFilePersistentVolumeSource{},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(azureFilePluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
|
||||
type fakeAzureSvc struct{}
|
||||
|
||||
func (s *fakeAzureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName string) (string, string, error) {
|
||||
return "name", "key", nil
|
||||
}
|
||||
func (s *fakeAzureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accountName, accountKey string) (string, error) {
|
||||
return "secret", nil
|
||||
}
|
||||
|
||||
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureFile: &v1.AzureFileVolumeSource{
|
||||
SecretName: "secret",
|
||||
ShareName: "share",
|
||||
},
|
||||
},
|
||||
}
|
||||
fake := &mount.FakeMounter{}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
|
||||
if _, ok := mounter.(volume.Unmounter); ok {
|
||||
t.Errorf("Volume Mounter can be type-assert to Unmounter")
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if _, ok := unmounter.(volume.Mounter); ok {
|
||||
t.Errorf("Volume Unmounter can be type-assert to Mounter")
|
||||
}
|
||||
}
|
||||
|
||||
type testcase struct {
|
||||
name string
|
||||
defaultNs string
|
||||
spec *volume.Spec
|
||||
// Expected return of the test
|
||||
expectedName string
|
||||
expectedNs string
|
||||
expectedError error
|
||||
}
|
||||
|
||||
func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
|
||||
secretNs := "ns"
|
||||
tests := []testcase{
|
||||
{
|
||||
name: "persistent volume source",
|
||||
defaultNs: "default",
|
||||
spec: &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureFile: &v1.AzureFilePersistentVolumeSource{
|
||||
ShareName: "share",
|
||||
SecretName: "name",
|
||||
SecretNamespace: &secretNs,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedName: "name",
|
||||
expectedNs: "ns",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "persistent volume source without namespace",
|
||||
defaultNs: "default",
|
||||
spec: &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureFile: &v1.AzureFilePersistentVolumeSource{
|
||||
ShareName: "share",
|
||||
SecretName: "name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedName: "name",
|
||||
expectedNs: "default",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "pod volume source",
|
||||
defaultNs: "default",
|
||||
spec: &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureFile: &v1.AzureFileVolumeSource{
|
||||
ShareName: "share",
|
||||
SecretName: "name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedName: "name",
|
||||
expectedNs: "default",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
for _, testcase := range tests {
|
||||
resultName, resultNs, err := getSecretNameAndNamespace(testcase.spec, testcase.defaultNs)
|
||||
if err != testcase.expectedError || resultName != testcase.expectedName || resultNs != testcase.expectedNs {
|
||||
t.Errorf("%s failed: expected err=%v ns=%q name=%q, got %v/%q/%q", testcase.name, testcase.expectedError, testcase.expectedNs, testcase.expectedName,
|
||||
err, resultNs, resultName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAppendDefaultMountOptions(t *testing.T) {
|
||||
tests := []struct {
|
||||
options []string
|
||||
fsGroup *int64
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
options: []string{"dir_mode=0777"},
|
||||
fsGroup: nil,
|
||||
expected: []string{"dir_mode=0777",
|
||||
fmt.Sprintf("%s=%s", fileMode, defaultFileMode),
|
||||
fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
},
|
||||
{
|
||||
options: []string{"file_mode=0777"},
|
||||
fsGroup: to.Int64Ptr(0),
|
||||
expected: []string{"file_mode=0777",
|
||||
fmt.Sprintf("%s=%s", dirMode, defaultDirMode),
|
||||
fmt.Sprintf("%s=%s", vers, defaultVers),
|
||||
fmt.Sprintf("%s=0", gid)},
|
||||
},
|
||||
{
|
||||
options: []string{"vers=2.1"},
|
||||
fsGroup: to.Int64Ptr(1000),
|
||||
expected: []string{"vers=2.1",
|
||||
fmt.Sprintf("%s=%s", fileMode, defaultFileMode),
|
||||
fmt.Sprintf("%s=%s", dirMode, defaultDirMode),
|
||||
fmt.Sprintf("%s=1000", gid)},
|
||||
},
|
||||
{
|
||||
options: []string{""},
|
||||
expected: []string{"", fmt.Sprintf("%s=%s",
|
||||
fileMode, defaultFileMode),
|
||||
fmt.Sprintf("%s=%s", dirMode, defaultDirMode),
|
||||
fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
},
|
||||
{
|
||||
options: []string{"file_mode=0777", "dir_mode=0777"},
|
||||
expected: []string{"file_mode=0777", "dir_mode=0777", fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
},
|
||||
{
|
||||
options: []string{"gid=2000"},
|
||||
fsGroup: to.Int64Ptr(1000),
|
||||
expected: []string{"gid=2000",
|
||||
fmt.Sprintf("%s=%s", fileMode, defaultFileMode),
|
||||
fmt.Sprintf("%s=%s", dirMode, defaultDirMode),
|
||||
"vers=3.0"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := appendDefaultMountOptions(test.options, test.fsGroup)
|
||||
if !reflect.DeepEqual(result, test.expected) {
|
||||
t.Errorf("input: %q, appendDefaultMountOptions result: %q, expected: %q", test.options, result, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
218
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go
generated
vendored
218
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go
generated
vendored
@ -1,218 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
var _ volume.DeletableVolumePlugin = &azureFilePlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &azureFilePlugin{}
|
||||
|
||||
// Abstract interface to file share operations.
|
||||
// azure cloud provider should implement it
|
||||
type azureCloudProvider interface {
|
||||
// create a file share
|
||||
CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error)
|
||||
// delete a file share
|
||||
DeleteFileShare(accountName, accountKey, shareName string) error
|
||||
// resize a file share
|
||||
ResizeFileShare(accountName, accountKey, name string, sizeGiB int) error
|
||||
}
|
||||
|
||||
type azureFileDeleter struct {
|
||||
*azureFile
|
||||
accountName, accountKey, shareName string
|
||||
azureProvider azureCloudProvider
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure provider")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return plugin.newDeleterInternal(spec, &azureSvc{}, azure)
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) newDeleterInternal(spec *volume.Spec, util azureUtil, azure azureCloudProvider) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureFile == nil {
|
||||
return nil, fmt.Errorf("invalid PV spec")
|
||||
}
|
||||
|
||||
secretName, secretNamespace, err := getSecretNameAndNamespace(spec, spec.PersistentVolume.Spec.ClaimRef.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
shareName := spec.PersistentVolume.Spec.AzureFile.ShareName
|
||||
if accountName, accountKey, err := util.GetAzureCredentials(plugin.host, secretNamespace, secretName); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return &azureFileDeleter{
|
||||
azureFile: &azureFile{
|
||||
volName: spec.Name(),
|
||||
plugin: plugin,
|
||||
},
|
||||
shareName: shareName,
|
||||
accountName: accountName,
|
||||
accountKey: accountKey,
|
||||
azureProvider: azure,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure provider")
|
||||
return nil, err
|
||||
}
|
||||
if len(options.PVC.Spec.AccessModes) == 0 {
|
||||
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, azure)
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) {
|
||||
return &azureFileProvisioner{
|
||||
azureFile: &azureFile{
|
||||
plugin: plugin,
|
||||
},
|
||||
azureProvider: azure,
|
||||
util: &azureSvc{},
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &azureFileDeleter{}
|
||||
|
||||
func (f *azureFileDeleter) GetPath() string {
|
||||
name := azureFilePluginName
|
||||
return f.plugin.host.GetPodVolumeDir(f.podUID, utilstrings.EscapeQualifiedNameForDisk(name), f.volName)
|
||||
}
|
||||
|
||||
func (f *azureFileDeleter) Delete() error {
|
||||
glog.V(4).Infof("deleting volume %s", f.shareName)
|
||||
return f.azureProvider.DeleteFileShare(f.accountName, f.accountKey, f.shareName)
|
||||
}
|
||||
|
||||
type azureFileProvisioner struct {
|
||||
*azureFile
|
||||
azureProvider azureCloudProvider
|
||||
util azureUtil
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &azureFileProvisioner{}
|
||||
|
||||
func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
|
||||
}
|
||||
if util.CheckPersistentVolumeClaimModeBlock(a.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", a.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
var sku, location, account string
|
||||
|
||||
// File share name has a length limit of 63, and it cannot contain two consecutive '-'s.
|
||||
name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
|
||||
name = strings.Replace(name, "--", "-", -1)
|
||||
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
requestGiB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
secretNamespace := a.options.PVC.Namespace
|
||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
for k, v := range a.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "skuname":
|
||||
sku = v
|
||||
case "location":
|
||||
location = v
|
||||
case "storageaccount":
|
||||
account = v
|
||||
case "secretnamespace":
|
||||
secretNamespace = v
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
// TODO: implement c.options.ProvisionerSelector parsing
|
||||
if a.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure file")
|
||||
}
|
||||
|
||||
account, key, err := a.azureProvider.CreateFileShare(name, account, sku, location, requestGiB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create a secret for storage account and key
|
||||
secretName, err := a.util.SetAzureCredentials(a.plugin.host, secretNamespace, account, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// create PV
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
util.VolumeDynamicallyCreatedByKey: "azure-file-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: a.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGiB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureFile: &v1.AzureFilePersistentVolumeSource{
|
||||
SecretName: secretName,
|
||||
ShareName: name,
|
||||
SecretNamespace: &secretNamespace,
|
||||
},
|
||||
},
|
||||
MountOptions: a.options.MountOptions,
|
||||
},
|
||||
}
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
// Return cloud provider
|
||||
func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) {
|
||||
azureCloudProvider, ok := cloudProvider.(*azure.Cloud)
|
||||
if !ok || azureCloudProvider == nil {
|
||||
return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
|
||||
return azureCloudProvider, nil
|
||||
}
|
138
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go
generated
vendored
138
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go
generated
vendored
@ -1,138 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
fileMode = "file_mode"
|
||||
dirMode = "dir_mode"
|
||||
gid = "gid"
|
||||
vers = "vers"
|
||||
defaultFileMode = "0755"
|
||||
defaultDirMode = "0755"
|
||||
defaultVers = "3.0"
|
||||
)
|
||||
|
||||
// Abstract interface to azure file operations.
|
||||
type azureUtil interface {
|
||||
GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName string) (string, string, error)
|
||||
SetAzureCredentials(host volume.VolumeHost, nameSpace, accountName, accountKey string) (string, error)
|
||||
}
|
||||
|
||||
type azureSvc struct{}
|
||||
|
||||
func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName string) (string, string, error) {
|
||||
var accountKey, accountName string
|
||||
kubeClient := host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return "", "", fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
|
||||
keys, err := kubeClient.CoreV1().Secrets(nameSpace).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName)
|
||||
}
|
||||
for name, data := range keys.Data {
|
||||
if name == "azurestorageaccountname" {
|
||||
accountName = string(data)
|
||||
}
|
||||
if name == "azurestorageaccountkey" {
|
||||
accountKey = string(data)
|
||||
}
|
||||
}
|
||||
if accountName == "" || accountKey == "" {
|
||||
return "", "", fmt.Errorf("Invalid %v/%v, couldn't extract azurestorageaccountname or azurestorageaccountkey", nameSpace, secretName)
|
||||
}
|
||||
return accountName, accountKey, nil
|
||||
}
|
||||
|
||||
func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accountName, accountKey string) (string, error) {
|
||||
kubeClient := host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return "", fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
secretName := "azure-storage-account-" + accountName + "-secret"
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: nameSpace,
|
||||
Name: secretName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"azurestorageaccountname": []byte(accountName),
|
||||
"azurestorageaccountkey": []byte(accountKey),
|
||||
},
|
||||
Type: "Opaque",
|
||||
}
|
||||
_, err := kubeClient.CoreV1().Secrets(nameSpace).Create(secret)
|
||||
if errors.IsAlreadyExists(err) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Couldn't create secret %v", err)
|
||||
}
|
||||
return secretName, err
|
||||
}
|
||||
|
||||
// check whether mountOptions contain file_mode, dir_mode, vers, gid, if not, append default mode
|
||||
func appendDefaultMountOptions(mountOptions []string, fsGroup *int64) []string {
|
||||
fileModeFlag := false
|
||||
dirModeFlag := false
|
||||
versFlag := false
|
||||
gidFlag := false
|
||||
|
||||
for _, mountOption := range mountOptions {
|
||||
if strings.HasPrefix(mountOption, fileMode) {
|
||||
fileModeFlag = true
|
||||
}
|
||||
if strings.HasPrefix(mountOption, dirMode) {
|
||||
dirModeFlag = true
|
||||
}
|
||||
if strings.HasPrefix(mountOption, vers) {
|
||||
versFlag = true
|
||||
}
|
||||
if strings.HasPrefix(mountOption, gid) {
|
||||
gidFlag = true
|
||||
}
|
||||
}
|
||||
|
||||
allMountOptions := mountOptions
|
||||
if !fileModeFlag {
|
||||
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", fileMode, defaultFileMode))
|
||||
}
|
||||
|
||||
if !dirModeFlag {
|
||||
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", dirMode, defaultDirMode))
|
||||
}
|
||||
|
||||
if !versFlag {
|
||||
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", vers, defaultVers))
|
||||
}
|
||||
|
||||
if !gidFlag && fsGroup != nil {
|
||||
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%d", gid, *fsGroup))
|
||||
}
|
||||
return allMountOptions
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/volume/azure_file/doc.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/azure_file/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package azure_file contains the internal representation of
|
||||
// Azure File Service Volume
|
||||
package azure_file // import "k8s.io/kubernetes/pkg/volume/azure_file"
|
53
vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD
generated
vendored
@ -1,53 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cephfs.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/cephfs",
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cephfs_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
9
vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS
generated
vendored
@ -1,9 +0,0 @@
|
||||
approvers:
|
||||
- rootfs
|
||||
- saad-ali
|
||||
reviewers:
|
||||
- rootfs
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- jingxu97
|
||||
- msau42
|
463
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go
generated
vendored
463
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go
generated
vendored
@ -1,463 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&cephfsPlugin{nil}}
|
||||
}
|
||||
|
||||
type cephfsPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &cephfsPlugin{}
|
||||
|
||||
const (
|
||||
cephfsPluginName = "kubernetes.io/cephfs"
|
||||
)
|
||||
|
||||
func (plugin *cephfsPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) GetPluginName() string {
|
||||
return cephfsPluginName
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
mon, _, _, _, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", mon), nil
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.Volume != nil && spec.Volume.CephFS != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CephFS != nil)
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) SupportsMountOption() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secret := ""
|
||||
if len(secretName) > 0 && len(secretNs) > 0 {
|
||||
// if secret is provideded, retrieve it
|
||||
kubeClient := plugin.host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return nil, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
|
||||
return nil, err
|
||||
}
|
||||
for name, data := range secrets.Data {
|
||||
secret = string(data)
|
||||
glog.V(4).Infof("found ceph secret info: %s", name)
|
||||
}
|
||||
}
|
||||
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(plugin.GetPluginName()), secret)
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
|
||||
mon, path, id, secretFile, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if id == "" {
|
||||
id = "admin"
|
||||
}
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
}
|
||||
|
||||
if secretFile == "" {
|
||||
secretFile = "/etc/ceph/" + id + ".secret"
|
||||
}
|
||||
|
||||
return &cephfsMounter{
|
||||
cephfs: &cephfs{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
mon: mon,
|
||||
path: path,
|
||||
secret: secret,
|
||||
id: id,
|
||||
secret_file: secretFile,
|
||||
readonly: readOnly,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
mountOptions: util.MountOptionFromSpec(spec),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &cephfsUnmounter{
|
||||
cephfs: &cephfs{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
plugin: plugin},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cephfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
cephfsVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
Monitors: []string{},
|
||||
Path: mountPath,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(cephfsVolume), nil
|
||||
}
|
||||
|
||||
// CephFS volumes represent a bare host file or directory mount of an CephFS export.
|
||||
type cephfs struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
mon []string
|
||||
path string
|
||||
id string
|
||||
secret string
|
||||
secret_file string
|
||||
readonly bool
|
||||
mounter mount.Interface
|
||||
plugin *cephfsPlugin
|
||||
volume.MetricsNil
|
||||
mountOptions []string
|
||||
}
|
||||
|
||||
type cephfsMounter struct {
|
||||
*cephfs
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &cephfsMounter{}
|
||||
|
||||
func (cephfsVolume *cephfsMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: cephfsVolume.readonly,
|
||||
Managed: false,
|
||||
SupportsSELinux: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (cephfsMounter *cephfsMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error {
|
||||
return cephfsVolume.SetUpAt(cephfsVolume.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether it belongs to fuse, if not, default to use kernel mount.
|
||||
if cephfsVolume.checkFuseMount() {
|
||||
glog.V(4).Info("CephFS fuse mount.")
|
||||
err = cephfsVolume.execFuseMount(dir)
|
||||
// cleanup no matter if fuse mount fail.
|
||||
keyringPath := cephfsVolume.GetKeyringPath()
|
||||
_, StatErr := os.Stat(keyringPath)
|
||||
if !os.IsNotExist(StatErr) {
|
||||
os.RemoveAll(keyringPath)
|
||||
}
|
||||
if err == nil {
|
||||
// cephfs fuse mount succeeded.
|
||||
return nil
|
||||
} else {
|
||||
// if cephfs fuse mount failed, fallback to kernel mount.
|
||||
glog.V(4).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Info("CephFS kernel mount.")
|
||||
|
||||
err = cephfsVolume.execMount(dir)
|
||||
if err != nil {
|
||||
// cleanup upon failure.
|
||||
util.UnmountPath(dir, cephfsVolume.mounter)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cephfsUnmounter struct {
|
||||
*cephfs
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &cephfsUnmounter{}
|
||||
|
||||
// TearDown unmounts the bind mount
|
||||
func (cephfsVolume *cephfsUnmounter) TearDown() error {
|
||||
return cephfsVolume.TearDownAt(cephfsVolume.GetPath())
|
||||
}
|
||||
|
||||
// TearDownAt unmounts the bind mount
|
||||
func (cephfsVolume *cephfsUnmounter) TearDownAt(dir string) error {
|
||||
return util.UnmountPath(dir, cephfsVolume.mounter)
|
||||
}
|
||||
|
||||
// GetPath creates global mount path
|
||||
func (cephfsVolume *cephfs) GetPath() string {
|
||||
name := cephfsPluginName
|
||||
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
|
||||
}
|
||||
|
||||
// GetKeyringPath creates cephfuse keyring path
|
||||
func (cephfsVolume *cephfs) GetKeyringPath() string {
|
||||
name := cephfsPluginName
|
||||
volumeDir := cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
|
||||
volumeKeyringDir := volumeDir + "~keyring"
|
||||
return volumeKeyringDir
|
||||
}
|
||||
|
||||
func (cephfsVolume *cephfs) execMount(mountpoint string) error {
|
||||
// cephfs mount option
|
||||
ceph_opt := ""
|
||||
// override secretfile if secret is provided
|
||||
if cephfsVolume.secret != "" {
|
||||
ceph_opt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret
|
||||
} else {
|
||||
ceph_opt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secret_file
|
||||
}
|
||||
// build option array
|
||||
opt := []string{}
|
||||
if cephfsVolume.readonly {
|
||||
opt = append(opt, "ro")
|
||||
}
|
||||
opt = append(opt, ceph_opt)
|
||||
|
||||
// build src like mon1:6789,mon2:6789,mon3:6789:/
|
||||
hosts := cephfsVolume.mon
|
||||
l := len(hosts)
|
||||
// pass all monitors and let ceph randomize and fail over
|
||||
i := 0
|
||||
src := ""
|
||||
for i = 0; i < l-1; i++ {
|
||||
src += hosts[i] + ","
|
||||
}
|
||||
src += hosts[i] + ":" + cephfsVolume.path
|
||||
|
||||
mountOptions := util.JoinMountOptions(cephfsVolume.mountOptions, opt)
|
||||
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", mountOptions); err != nil {
|
||||
return fmt.Errorf("CephFS: mount failed: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cephfsMounter *cephfsMounter) checkFuseMount() bool {
|
||||
execute := cephfsMounter.plugin.host.GetExec(cephfsMounter.plugin.GetPluginName())
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if _, err := execute.Run("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph"); err == nil {
|
||||
glog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
|
||||
// cephfs keyring file
|
||||
keyring_file := ""
|
||||
// override secretfile if secret is provided
|
||||
if cephfsVolume.secret != "" {
|
||||
// TODO: cephfs fuse currently doesn't support secret option,
|
||||
// remove keyring file create once secret option is supported.
|
||||
glog.V(4).Info("cephfs mount begin using fuse.")
|
||||
|
||||
keyringPath := cephfsVolume.GetKeyringPath()
|
||||
os.MkdirAll(keyringPath, 0750)
|
||||
|
||||
payload := make(map[string]util.FileProjection, 1)
|
||||
var fileProjection util.FileProjection
|
||||
|
||||
keyring := fmt.Sprintf("[client.%s]\nkey = %s\n", cephfsVolume.id, cephfsVolume.secret)
|
||||
|
||||
fileProjection.Data = []byte(keyring)
|
||||
fileProjection.Mode = int32(0644)
|
||||
fileName := cephfsVolume.id + ".keyring"
|
||||
|
||||
payload[fileName] = fileProjection
|
||||
|
||||
writerContext := fmt.Sprintf("cephfuse:%v.keyring", cephfsVolume.id)
|
||||
writer, err := util.NewAtomicWriter(keyringPath, writerContext)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create atomic writer: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Write(payload)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to write payload to dir: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
keyring_file = path.Join(keyringPath, fileName)
|
||||
|
||||
} else {
|
||||
keyring_file = cephfsVolume.secret_file
|
||||
}
|
||||
|
||||
// build src like mon1:6789,mon2:6789,mon3:6789:/
|
||||
hosts := cephfsVolume.mon
|
||||
l := len(hosts)
|
||||
// pass all monitors and let ceph randomize and fail over
|
||||
i := 0
|
||||
src := ""
|
||||
for i = 0; i < l-1; i++ {
|
||||
src += hosts[i] + ","
|
||||
}
|
||||
src += hosts[i]
|
||||
|
||||
mountArgs := []string{}
|
||||
mountArgs = append(mountArgs, "-k")
|
||||
mountArgs = append(mountArgs, keyring_file)
|
||||
mountArgs = append(mountArgs, "-m")
|
||||
mountArgs = append(mountArgs, src)
|
||||
mountArgs = append(mountArgs, mountpoint)
|
||||
mountArgs = append(mountArgs, "-r")
|
||||
mountArgs = append(mountArgs, cephfsVolume.path)
|
||||
mountArgs = append(mountArgs, "--id")
|
||||
mountArgs = append(mountArgs, cephfsVolume.id)
|
||||
|
||||
glog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs)
|
||||
command := exec.Command("ceph-fuse", mountArgs...)
|
||||
output, err := command.CombinedOutput()
|
||||
if err != nil || !(strings.Contains(string(output), "starting fuse")) {
|
||||
return fmt.Errorf("Ceph-fuse failed: %v\narguments: %s\nOutput: %s\n", err, mountArgs, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) ([]string, string, string, string, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.CephFS != nil {
|
||||
mon := spec.Volume.CephFS.Monitors
|
||||
path := spec.Volume.CephFS.Path
|
||||
user := spec.Volume.CephFS.User
|
||||
secretFile := spec.Volume.CephFS.SecretFile
|
||||
readOnly := spec.Volume.CephFS.ReadOnly
|
||||
return mon, path, user, secretFile, readOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CephFS != nil {
|
||||
mon := spec.PersistentVolume.Spec.CephFS.Monitors
|
||||
path := spec.PersistentVolume.Spec.CephFS.Path
|
||||
user := spec.PersistentVolume.Spec.CephFS.User
|
||||
secretFile := spec.PersistentVolume.Spec.CephFS.SecretFile
|
||||
readOnly := spec.PersistentVolume.Spec.CephFS.ReadOnly
|
||||
return mon, path, user, secretFile, readOnly, nil
|
||||
}
|
||||
|
||||
return nil, "", "", "", false, fmt.Errorf("Spec does not reference a CephFS volume type")
|
||||
}
|
||||
|
||||
func getSecretNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) {
|
||||
if spec.Volume != nil && spec.Volume.CephFS != nil {
|
||||
localSecretRef := spec.Volume.CephFS.SecretRef
|
||||
if localSecretRef != nil {
|
||||
return localSecretRef.Name, defaultNamespace, nil
|
||||
}
|
||||
return "", "", nil
|
||||
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CephFS != nil {
|
||||
secretRef := spec.PersistentVolume.Spec.CephFS.SecretRef
|
||||
secretNs := defaultNamespace
|
||||
if secretRef != nil {
|
||||
if len(secretRef.Namespace) != 0 {
|
||||
secretNs = secretRef.Namespace
|
||||
}
|
||||
return secretRef.Name, secretNs, nil
|
||||
}
|
||||
return "", "", nil
|
||||
}
|
||||
return "", "", fmt.Errorf("Spec does not reference an CephFS volume type")
|
||||
}
|
227
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs_test.go
generated
vendored
227
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs_test.go
generated
vendored
@ -1,227 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cephTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/cephfs" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{CephFS: &v1.CephFSVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cephTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
Monitors: []string{"a", "b"},
|
||||
User: "user",
|
||||
SecretRef: nil,
|
||||
SecretFile: "/etc/ceph/user.secret",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volumePath := mounter.GetPath()
|
||||
volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1")
|
||||
if volumePath != volpath {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstructVolumeSpec(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cephTest")
|
||||
if err != nil {
|
||||
t.Fatalf("Can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
||||
if err != nil {
|
||||
t.Errorf("can't find cephfs plugin by name")
|
||||
}
|
||||
|
||||
cephfsSpec, err := plug.(*cephfsPlugin).ConstructVolumeSpec("cephfsVolume", "/cephfsVolume/")
|
||||
|
||||
if cephfsSpec.Name() != "cephfsVolume" {
|
||||
t.Errorf("Get wrong cephfs spec name, got: %s", cephfsSpec.Name())
|
||||
}
|
||||
}
|
||||
|
||||
type testcase struct {
|
||||
name string
|
||||
defaultNs string
|
||||
spec *volume.Spec
|
||||
// Expected return of the test
|
||||
expectedName string
|
||||
expectedNs string
|
||||
expectedError error
|
||||
}
|
||||
|
||||
func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
|
||||
tests := []testcase{
|
||||
{
|
||||
name: "persistent volume source",
|
||||
defaultNs: "default",
|
||||
spec: &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CephFS: &v1.CephFSPersistentVolumeSource{
|
||||
Monitors: []string{"a", "b"},
|
||||
User: "user",
|
||||
SecretRef: &v1.SecretReference{
|
||||
Name: "name",
|
||||
Namespace: "ns",
|
||||
},
|
||||
SecretFile: "/etc/ceph/user.secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedName: "name",
|
||||
expectedNs: "ns",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "persistent volume source without namespace",
|
||||
defaultNs: "default",
|
||||
spec: &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CephFS: &v1.CephFSPersistentVolumeSource{
|
||||
Monitors: []string{"a", "b"},
|
||||
User: "user",
|
||||
SecretRef: &v1.SecretReference{
|
||||
Name: "name",
|
||||
},
|
||||
SecretFile: "/etc/ceph/user.secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedName: "name",
|
||||
expectedNs: "default",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "pod volume source",
|
||||
defaultNs: "default",
|
||||
spec: &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
Monitors: []string{"a", "b"},
|
||||
User: "user",
|
||||
SecretRef: &v1.LocalObjectReference{
|
||||
Name: "name",
|
||||
},
|
||||
SecretFile: "/etc/ceph/user.secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedName: "name",
|
||||
expectedNs: "default",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
for _, testcase := range tests {
|
||||
resultName, resultNs, err := getSecretNameAndNamespace(testcase.spec, testcase.defaultNs)
|
||||
if err != testcase.expectedError || resultName != testcase.expectedName || resultNs != testcase.expectedNs {
|
||||
t.Errorf("%s failed: expected err=%v ns=%q name=%q, got %v/%q/%q", testcase.name, testcase.expectedError, testcase.expectedNs, testcase.expectedName,
|
||||
err, resultNs, resultName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/volume/cephfs/doc.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/cephfs/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package nfs contains the internal representation of Ceph file system
|
||||
// (CephFS) volumes.
|
||||
package cephfs // import "k8s.io/kubernetes/pkg/volume/cephfs"
|
70
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD
generated
vendored
@ -1,70 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"cinder.go",
|
||||
"cinder_util.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/cinder",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/openstack:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"cinder_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
13
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS
generated
vendored
@ -1,13 +0,0 @@
|
||||
approvers:
|
||||
- jsafrane
|
||||
- anguslees
|
||||
- dims
|
||||
- FengyunPan2
|
||||
reviewers:
|
||||
- anguslees
|
||||
- rootfs
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- jingxu97
|
||||
- msau42
|
||||
- FengyunPan2
|
408
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
408
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
@ -1,408 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type cinderDiskAttacher struct {
|
||||
host volume.VolumeHost
|
||||
cinderProvider BlockStorageProvider
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &cinderDiskAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
const (
|
||||
probeVolumeInitDelay = 1 * time.Second
|
||||
probeVolumeFactor = 2.0
|
||||
operationFinishInitDelay = 1 * time.Second
|
||||
operationFinishFactor = 1.1
|
||||
operationFinishSteps = 10
|
||||
diskAttachInitDelay = 1 * time.Second
|
||||
diskAttachFactor = 1.2
|
||||
diskAttachSteps = 15
|
||||
diskDetachInitDelay = 1 * time.Second
|
||||
diskDetachFactor = 1.2
|
||||
diskDetachSteps = 13
|
||||
)
|
||||
|
||||
func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
cinder, err := plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cinderDiskAttacher{
|
||||
host: plugin.host,
|
||||
cinderProvider: cinder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: operationFinishInitDelay,
|
||||
Factor: operationFinishFactor,
|
||||
Steps: operationFinishSteps,
|
||||
}
|
||||
|
||||
var volumeStatus string
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
var pending bool
|
||||
var err error
|
||||
pending, volumeStatus, err = attacher.cinderProvider.OperationPending(volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !pending, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Volume %q is %s, can't finish within the alloted time", volumeID, volumeStatus)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: diskAttachInitDelay,
|
||||
Factor: diskAttachFactor,
|
||||
Steps: diskAttachSteps,
|
||||
}
|
||||
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return attached, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Volume %q failed to be attached within the alloted time", volumeID)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
instanceID, err := attacher.nodeInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := attacher.waitOperationFinished(volumeID); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Warningf(
|
||||
"Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v",
|
||||
volumeID, instanceID, err)
|
||||
}
|
||||
|
||||
if err == nil && attached {
|
||||
// Volume is already attached to instance.
|
||||
glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
|
||||
} else {
|
||||
_, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID)
|
||||
if err == nil {
|
||||
if err = attacher.waitDiskAttached(instanceID, volumeID); err != nil {
|
||||
glog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
|
||||
} else {
|
||||
glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID)
|
||||
if err != nil {
|
||||
glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return devicePath, nil
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
volumeSpecMap := make(map[string]*volume.Spec)
|
||||
volumeIDList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
volumeIDList = append(volumeIDList, volumeID)
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumeSpecMap[volumeID] = spec
|
||||
}
|
||||
|
||||
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
||||
volumeIDList, nodeName, err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
for volumeID, attached := range attachedResult {
|
||||
if !attached {
|
||||
spec := volumeSpecMap[volumeID]
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
||||
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if devicePath == "" {
|
||||
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty", volumeID)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(probeVolumeInitDelay)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
duration := probeVolumeInitDelay
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
|
||||
probeAttachedVolume()
|
||||
if !attacher.cinderProvider.ShouldTrustDevicePath() {
|
||||
// Using the Cinder volume ID, find the real device path (See Issue #33128)
|
||||
devicePath = attacher.cinderProvider.GetDevicePath(volumeID)
|
||||
}
|
||||
exists, err := volumeutil.PathExists(devicePath)
|
||||
if exists && err == nil {
|
||||
glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
|
||||
return devicePath, nil
|
||||
}
|
||||
// Log an error, and continue checking periodically
|
||||
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
|
||||
// Using exponential backoff instead of linear
|
||||
ticker.Stop()
|
||||
duration = time.Duration(float64(duration) * probeVolumeFactor)
|
||||
ticker = time.NewTicker(duration)
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("could not find attached Cinder disk %q. Timeout waiting for mount paths to be created", volumeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDName(attacher.host, volumeID), nil
|
||||
}
|
||||
|
||||
// FIXME: this method can be further pruned.
|
||||
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.host.GetMounter(cinderVolumePluginName)
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, volumeFSType, readOnly, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeFSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cinderDiskDetacher struct {
|
||||
mounter mount.Interface
|
||||
cinderProvider BlockStorageProvider
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &cinderDiskDetacher{}
|
||||
|
||||
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
cinder, err := plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cinderDiskDetacher{
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
cinderProvider: cinder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: operationFinishInitDelay,
|
||||
Factor: operationFinishFactor,
|
||||
Steps: operationFinishSteps,
|
||||
}
|
||||
|
||||
var volumeStatus string
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
var pending bool
|
||||
var err error
|
||||
pending, volumeStatus, err = detacher.cinderProvider.OperationPending(volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !pending, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Volume %q is %s, can't finish within the alloted time", volumeID, volumeStatus)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: diskDetachInitDelay,
|
||||
Factor: diskDetachFactor,
|
||||
Steps: diskDetachSteps,
|
||||
}
|
||||
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !attached, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Volume %q failed to detach within the alloted time", volumeID)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
volumeID := path.Base(volumeName)
|
||||
if err := detacher.waitOperationFinished(volumeID); err != nil {
|
||||
return err
|
||||
}
|
||||
attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
||||
volumeID, nodeName, err)
|
||||
}
|
||||
|
||||
if err == nil && !attached {
|
||||
// Volume is already detached from node.
|
||||
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil {
|
||||
glog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err)
|
||||
return err
|
||||
}
|
||||
if err = detacher.waitDiskDetached(instanceID, volumeID); err != nil {
|
||||
glog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err)
|
||||
return err
|
||||
}
|
||||
glog.Infof("detached volume %q from node %q", volumeID, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (detacher *cinderDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) nodeInstanceID(nodeName types.NodeName) (string, error) {
|
||||
instances, res := attacher.cinderProvider.Instances()
|
||||
if !res {
|
||||
return "", fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
|
||||
instanceID = instanceID[(ind + 1):]
|
||||
}
|
||||
return instanceID, nil
|
||||
}
|
745
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
745
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
@ -1,745 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
VolumeStatusPending = "pending"
|
||||
VolumeStatusDone = "done"
|
||||
)
|
||||
|
||||
var attachStatus = "Attach"
|
||||
var detachStatus = "Detach"
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-cinder-volume"
|
||||
spec := createVolSpec(name, false)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetDeviceName error: %v", err)
|
||||
}
|
||||
if deviceName != name {
|
||||
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDeviceName_PersistentVolume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-cinder-pv"
|
||||
spec := createPVSpec(name, true)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("GetDeviceName error: %v", err)
|
||||
}
|
||||
if deviceName != name {
|
||||
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDeviceMountPath(t *testing.T) {
|
||||
name := "cinder-volume-id"
|
||||
spec := createVolSpec(name, false)
|
||||
rootDir := "/var/lib/kubelet/"
|
||||
host := volumetest.NewFakeVolumeHost(rootDir, nil, nil)
|
||||
|
||||
attacher := &cinderDiskAttacher{
|
||||
host: host,
|
||||
}
|
||||
|
||||
//test the path
|
||||
path, err := attacher.GetDeviceMountPath(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Get device mount path error")
|
||||
}
|
||||
expectedPath := rootDir + "plugins/kubernetes.io/cinder/mounts/" + name
|
||||
if path != expectedPath {
|
||||
t.Errorf("Device mount path error: expected %s, got %s ", expectedPath, path)
|
||||
}
|
||||
}
|
||||
|
||||
// One testcase for TestAttachDetach table test below
|
||||
type testcase struct {
|
||||
name string
|
||||
// For fake GCE:
|
||||
attach attachCall
|
||||
detach detachCall
|
||||
operationPending operationPendingCall
|
||||
diskIsAttached diskIsAttachedCall
|
||||
disksAreAttached disksAreAttachedCall
|
||||
diskPath diskPathCall
|
||||
t *testing.T
|
||||
attachOrDetach *string
|
||||
|
||||
instanceID string
|
||||
// Actual test to run
|
||||
test func(test *testcase) (string, error)
|
||||
// Expected return of the test
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
volumeID := "disk"
|
||||
instanceID := "instance"
|
||||
pending := VolumeStatusPending
|
||||
done := VolumeStatusDone
|
||||
nodeName := types.NodeName("nodeName")
|
||||
readOnly := false
|
||||
spec := createVolSpec(volumeID, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
diskCheckError := errors.New("Fake DiskIsAttached error")
|
||||
diskPathError := errors.New("Fake GetAttachmentDiskPath error")
|
||||
disksCheckError := errors.New("Fake DisksAreAttached error")
|
||||
operationFinishTimeout := errors.New("Fake waitOperationFinished error")
|
||||
tests := []testcase{
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil},
|
||||
attach: attachCall{instanceID, volumeID, "", nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedResult: "/dev/sda",
|
||||
},
|
||||
|
||||
// Disk is already attached
|
||||
{
|
||||
name: "Attach_Positive_AlreadyAttached",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, true, nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedResult: "/dev/sda",
|
||||
},
|
||||
|
||||
// Disk is attaching
|
||||
{
|
||||
name: "Attach_is_attaching",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, true, pending, operationFinishTimeout},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: operationFinishTimeout,
|
||||
},
|
||||
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError},
|
||||
attach: attachCall{instanceID, volumeID, "/dev/sda", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: attachError,
|
||||
},
|
||||
|
||||
// GetAttachmentDiskPath call fails
|
||||
{
|
||||
name: "Attach_Negative_DiskPatchFails",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil},
|
||||
attach: attachCall{instanceID, volumeID, "", nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "", diskPathError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
},
|
||||
expectedError: diskPathError,
|
||||
},
|
||||
|
||||
// Successful VolumesAreAttached call, attached
|
||||
{
|
||||
name: "VolumesAreAttached_Positive",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, map[string]bool{volumeID: true}, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
return serializeAttachments(attachments), err
|
||||
},
|
||||
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: true}),
|
||||
},
|
||||
|
||||
// Successful VolumesAreAttached call, not attached
|
||||
{
|
||||
name: "VolumesAreAttached_Negative",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, map[string]bool{volumeID: false}, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
return serializeAttachments(attachments), err
|
||||
},
|
||||
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: false}),
|
||||
},
|
||||
|
||||
// Treat as attached when DisksAreAttached call fails
|
||||
{
|
||||
name: "VolumesAreAttached_CinderFailed",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, nil, disksCheckError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
return serializeAttachments(attachments), err
|
||||
},
|
||||
expectedResult: serializeAttachments(map[*volume.Spec]bool{spec: true}),
|
||||
expectedError: disksCheckError,
|
||||
},
|
||||
|
||||
// Detach succeeds
|
||||
{
|
||||
name: "Detach_Positive",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, true, nil},
|
||||
detach: detachCall{instanceID, volumeID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Disk is already detached
|
||||
{
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach succeeds when DiskIsAttached fails
|
||||
{
|
||||
name: "Detach_Positive_CheckFails",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError},
|
||||
detach: detachCall{instanceID, volumeID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
},
|
||||
|
||||
// Detach fails
|
||||
{
|
||||
name: "Detach_Negative",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError},
|
||||
detach: detachCall{instanceID, volumeID, detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
expectedError: detachError,
|
||||
},
|
||||
|
||||
// // Disk is detaching
|
||||
{
|
||||
name: "Detach_Is_Detaching",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, true, pending, operationFinishTimeout},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
},
|
||||
expectedError: operationFinishTimeout,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
testcase.t = t
|
||||
attachOrDetach := ""
|
||||
testcase.attachOrDetach = &attachOrDetach
|
||||
result, err := testcase.test(&testcase)
|
||||
if err != testcase.expectedError {
|
||||
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError, err)
|
||||
}
|
||||
if result != testcase.expectedResult {
|
||||
t.Errorf("%s failed: expected result=%q, got %q", testcase.name, testcase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type volumeAttachmentFlag struct {
|
||||
volumeID string
|
||||
attached bool
|
||||
}
|
||||
|
||||
type volumeAttachmentFlags []volumeAttachmentFlag
|
||||
|
||||
func (va volumeAttachmentFlags) Len() int {
|
||||
return len(va)
|
||||
}
|
||||
|
||||
func (va volumeAttachmentFlags) Swap(i, j int) {
|
||||
va[i], va[j] = va[j], va[i]
|
||||
}
|
||||
|
||||
func (va volumeAttachmentFlags) Less(i, j int) bool {
|
||||
if va[i].volumeID < va[j].volumeID {
|
||||
return true
|
||||
}
|
||||
if va[i].volumeID > va[j].volumeID {
|
||||
return false
|
||||
}
|
||||
return va[j].attached
|
||||
}
|
||||
|
||||
func serializeAttachments(attachments map[*volume.Spec]bool) string {
|
||||
var attachmentFlags volumeAttachmentFlags
|
||||
for spec, attached := range attachments {
|
||||
attachmentFlags = append(attachmentFlags, volumeAttachmentFlag{spec.Name(), attached})
|
||||
}
|
||||
sort.Sort(attachmentFlags)
|
||||
return fmt.Sprint(attachmentFlags)
|
||||
}
|
||||
|
||||
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
|
||||
// and NewDetacher won't work.
|
||||
func newPlugin() *cinderPlugin {
|
||||
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
|
||||
plugins := ProbeVolumePlugins()
|
||||
plugin := plugins[0]
|
||||
plugin.Init(host)
|
||||
return plugin.(*cinderPlugin)
|
||||
}
|
||||
|
||||
func newAttacher(testcase *testcase) *cinderDiskAttacher {
|
||||
return &cinderDiskAttacher{
|
||||
host: nil,
|
||||
cinderProvider: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func newDetacher(testcase *testcase) *cinderDiskDetacher {
|
||||
return &cinderDiskDetacher{
|
||||
cinderProvider: testcase,
|
||||
}
|
||||
}
|
||||
|
||||
func createVolSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Cinder: &v1.CinderPersistentVolumeSource{
|
||||
VolumeID: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Fake GCE implementation
|
||||
|
||||
type attachCall struct {
|
||||
instanceID string
|
||||
volumeID string
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
|
||||
type detachCall struct {
|
||||
instanceID string
|
||||
devicePath string
|
||||
ret error
|
||||
}
|
||||
|
||||
type operationPendingCall struct {
|
||||
diskName string
|
||||
pending bool
|
||||
volumeStatus string
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
instanceID string
|
||||
nodeName types.NodeName
|
||||
volumeID string
|
||||
isAttached bool
|
||||
ret error
|
||||
}
|
||||
|
||||
type diskPathCall struct {
|
||||
instanceID string
|
||||
volumeID string
|
||||
retPath string
|
||||
ret error
|
||||
}
|
||||
|
||||
type disksAreAttachedCall struct {
|
||||
instanceID string
|
||||
nodeName types.NodeName
|
||||
volumeIDs []string
|
||||
areAttached map[string]bool
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("unexpected AttachDisk call")
|
||||
return "", errors.New("unexpected AttachDisk call")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("unexpected AttachDisk call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return "", errors.New("unexpected AttachDisk call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("unexpected AttachDisk call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
|
||||
|
||||
testcase.attachOrDetach = &attachStatus
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DetachDisk(instanceID, volumeID string) error {
|
||||
expected := &testcase.detach
|
||||
|
||||
if expected.devicePath == "" && expected.instanceID == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("unexpected DetachDisk call")
|
||||
return errors.New("unexpected DetachDisk call")
|
||||
}
|
||||
|
||||
if expected.devicePath != volumeID {
|
||||
testcase.t.Errorf("unexpected DetachDisk call: expected volumeID %s, got %s", expected.devicePath, volumeID)
|
||||
return errors.New("unexpected DetachDisk call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return errors.New("unexpected DetachDisk call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
|
||||
|
||||
testcase.attachOrDetach = &detachStatus
|
||||
return expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) OperationPending(diskName string) (bool, string, error) {
|
||||
expected := &testcase.operationPending
|
||||
|
||||
if expected.volumeStatus == VolumeStatusPending {
|
||||
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
|
||||
return true, expected.volumeStatus, expected.ret
|
||||
}
|
||||
|
||||
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
|
||||
|
||||
return false, expected.volumeStatus, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
// If testcase call DetachDisk*, return false
|
||||
if *testcase.attachOrDetach == detachStatus {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If testcase call AttachDisk*, return true
|
||||
if *testcase.attachOrDetach == attachStatus {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("unexpected DiskIsAttached call")
|
||||
return false, errors.New("unexpected DiskIsAttached call")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("unexpected DiskIsAttached call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return false, errors.New("unexpected DiskIsAttached call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, errors.New("unexpected DiskIsAttached call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
|
||||
expected := &testcase.diskPath
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.diskPath looks uninitialized, test did not expect to
|
||||
// call GetAttachmentDiskPath
|
||||
testcase.t.Errorf("unexpected GetAttachmentDiskPath call")
|
||||
return "", errors.New("unexpected GetAttachmentDiskPath call")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("unexpected GetAttachmentDiskPath call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return "", errors.New("unexpected GetAttachmentDiskPath call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("unexpected GetAttachmentDiskPath call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("unexpected GetAttachmentDiskPath call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
|
||||
|
||||
return expected.retPath, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) ShouldTrustDevicePath() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
instanceID := expected.instanceID
|
||||
// If testcase call DetachDisk*, return false
|
||||
if *testcase.attachOrDetach == detachStatus {
|
||||
return false, instanceID, nil
|
||||
}
|
||||
|
||||
// If testcase call AttachDisk*, return true
|
||||
if *testcase.attachOrDetach == attachStatus {
|
||||
return true, instanceID, nil
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected nodename %s, got %s", expected.nodeName, nodeName)
|
||||
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong nodename")
|
||||
}
|
||||
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("unexpected DiskIsAttachedByName call")
|
||||
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.instanceID, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
|
||||
return "", "", false, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetDevicePath(volumeID string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (testcase *testcase) InstanceID() (string, error) {
|
||||
return testcase.instanceID, nil
|
||||
}
|
||||
|
||||
func (testcase *testcase) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
|
||||
return resource.Quantity{}, nil
|
||||
}
|
||||
|
||||
func (testcase *testcase) DeleteVolume(volumeID string) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) GetAutoLabelsForPD(name string) (map[string]string, error) {
|
||||
return map[string]string{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (testcase *testcase) Instances() (cloudprovider.Instances, bool) {
|
||||
return &instances{testcase.instanceID}, true
|
||||
}
|
||||
|
||||
func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
|
||||
expected := &testcase.disksAreAttached
|
||||
|
||||
areAttached := make(map[string]bool)
|
||||
|
||||
if len(expected.volumeIDs) == 0 && expected.instanceID == "" {
|
||||
// testcase.volumeIDs looks uninitialized, test did not expect to call DisksAreAttached
|
||||
testcase.t.Errorf("Unexpected DisksAreAttached call!")
|
||||
return areAttached, errors.New("Unexpected DisksAreAttached call")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expected.volumeIDs, volumeIDs) {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttached call: expected volumeIDs %v, got %v", expected.volumeIDs, volumeIDs)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret)
|
||||
|
||||
return expected.areAttached, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) {
|
||||
expected := &testcase.disksAreAttached
|
||||
areAttached := make(map[string]bool)
|
||||
|
||||
instanceID := expected.instanceID
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong nodename")
|
||||
}
|
||||
if len(expected.volumeIDs) == 0 && expected.instanceID == "" {
|
||||
// testcase.volumeIDs looks uninitialized, test did not expect to call DisksAreAttached
|
||||
testcase.t.Errorf("Unexpected DisksAreAttachedByName call!")
|
||||
return areAttached, errors.New("Unexpected DisksAreAttachedByName call")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expected.volumeIDs, volumeIDs) {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected volumeIDs %v, got %v", expected.volumeIDs, volumeIDs)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret)
|
||||
|
||||
return expected.areAttached, expected.ret
|
||||
}
|
||||
|
||||
// Implementation of fake cloudprovider.Instances
|
||||
type instances struct {
|
||||
instanceID string
|
||||
}
|
||||
|
||||
func (instances *instances) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
return []v1.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
return []v1.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return instances.instanceID, nil
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (instances *instances) List(filter string) ([]types.NodeName, error) {
|
||||
return []types.NodeName{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
func (instances *instances) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
557
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
557
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
@ -1,557 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultCloudConfigPath is the default path for cloud configuration
|
||||
DefaultCloudConfigPath = "/etc/kubernetes/cloud-config"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&cinderPlugin{}}
|
||||
}
|
||||
|
||||
// BlockStorageProvider is the interface for accessing cinder functionality.
|
||||
type BlockStorageProvider interface {
|
||||
AttachDisk(instanceID, volumeID string) (string, error)
|
||||
DetachDisk(instanceID, volumeID string) error
|
||||
DeleteVolume(volumeID string) error
|
||||
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error)
|
||||
GetDevicePath(volumeID string) string
|
||||
InstanceID() (string, error)
|
||||
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
|
||||
OperationPending(diskName string) (bool, string, error)
|
||||
DiskIsAttached(instanceID, volumeID string) (bool, error)
|
||||
DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error)
|
||||
DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error)
|
||||
ShouldTrustDevicePath() bool
|
||||
Instances() (cloudprovider.Instances, bool)
|
||||
ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
|
||||
}
|
||||
|
||||
type cinderPlugin struct {
|
||||
host volume.VolumeHost
|
||||
// Guarding SetUp and TearDown operations
|
||||
volumeLocks keymutex.KeyMutex
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &cinderPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &cinderPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &cinderPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
const (
|
||||
cinderVolumePluginName = "kubernetes.io/cinder"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(cinderVolumePluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.volumeLocks = keymutex.NewKeyMutex()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetPluginName() string {
|
||||
return cinderVolumePluginName
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.Volume != nil && spec.Volume.Cinder != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder != nil)
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) SupportsMountOption() bool {
|
||||
return true
|
||||
|
||||
}
|
||||
func (plugin *cinderPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod.UID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
pdName, fsType, readOnly, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cinderVolumeMounter{
|
||||
cinderVolume: &cinderVolume{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
pdName: pdName,
|
||||
mounter: mounter,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &cinderVolumeUnmounter{
|
||||
&cinderVolume{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return plugin.newDeleterInternal(spec, &DiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdManager) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.Cinder is nil")
|
||||
}
|
||||
return &cinderVolumeDeleter{
|
||||
&cinderVolume{
|
||||
volName: spec.Name(),
|
||||
pdName: spec.PersistentVolume.Spec.Cinder.VolumeID,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return plugin.newProvisionerInternal(options, &DiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, manager cdManager) (volume.Provisioner, error) {
|
||||
return &cinderVolumeProvisioner{
|
||||
cinderVolume: &cinderVolume{
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) getCloudProvider() (BlockStorageProvider, error) {
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
if cloud == nil {
|
||||
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
|
||||
var config *os.File
|
||||
config, err = os.Open(DefaultCloudConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load OpenStack configuration from default path : %v", err)
|
||||
}
|
||||
defer config.Close()
|
||||
cloud, err = cloudprovider.GetCloudProvider(openstack.ProviderName, config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create OpenStack cloud provider from default path : %v", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("OpenStack cloud provider was not initialized properly : %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
switch cloud := cloud.(type) {
|
||||
case *openstack.OpenStack:
|
||||
return cloud, nil
|
||||
default:
|
||||
return nil, errors.New("invalid cloud provider: expected OpenStack")
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath)
|
||||
cinderVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(cinderVolume), nil
|
||||
}
|
||||
|
||||
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
cloud, err := plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
expandedSize, err := cloud.ExpandVolume(volumeID, oldSize, newSize)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value()))
|
||||
return expandedSize, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) RequiresFSResize() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Abstract interface to PD operations.
|
||||
type cdManager interface {
|
||||
// Attaches the disk to the kubelet's host machine.
|
||||
AttachDisk(mounter *cinderVolumeMounter, globalPDPath string) error
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
DetachDisk(unmounter *cinderVolumeUnmounter) error
|
||||
// Creates a volume
|
||||
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *cinderVolumeDeleter) error
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &cinderVolumeMounter{}
|
||||
|
||||
type cinderVolumeMounter struct {
|
||||
*cinderVolume
|
||||
fsType string
|
||||
readOnly bool
|
||||
blockDeviceMounter *mount.SafeFormatAndMount
|
||||
}
|
||||
|
||||
// cinderPersistentDisk volumes are disk resources provided by C3
|
||||
// that are attached to the kubelet's host machine and exposed to the pod.
|
||||
type cinderVolume struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
// Unique identifier of the volume, used to find the disk resource in the provider.
|
||||
pdName string
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
||||
manager cdManager
|
||||
// Mounter interface that provides system calls to mount the global path to the pod local path.
|
||||
mounter mount.Interface
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
blockDeviceMounter mount.Interface
|
||||
plugin *cinderPlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
func (b *cinderVolumeMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *cinderVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUp bind mounts to the volume path.
|
||||
func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
|
||||
|
||||
b.plugin.volumeLocks.LockKey(b.pdName)
|
||||
defer b.plugin.volumeLocks.UnlockKey(b.pdName)
|
||||
|
||||
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("Cannot validate mount point: %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if !notmnt {
|
||||
glog.V(4).Infof("Something is already mounted to target %s", dir)
|
||||
return nil
|
||||
}
|
||||
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, options)
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Mount failed: %v", err)
|
||||
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notmnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notmnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
glog.Errorf("Failed to mount %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
}
|
||||
glog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeGlobalPDName(host volume.VolumeHost, devName string) string {
|
||||
return path.Join(host.GetPluginDir(cinderVolumePluginName), mount.MountsInGlobalPDPath, devName)
|
||||
}
|
||||
|
||||
func (cd *cinderVolume) GetPath() string {
|
||||
return getPath(cd.podUID, cd.volName, cd.plugin.host)
|
||||
}
|
||||
|
||||
type cinderVolumeUnmounter struct {
|
||||
*cinderVolume
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &cinderVolumeUnmounter{}
|
||||
|
||||
func (c *cinderVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
|
||||
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Cinder TearDown of %s", dir)
|
||||
notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if notmnt {
|
||||
glog.V(4).Infof("Nothing is mounted to %s, ignoring", dir)
|
||||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
// Find Cinder volumeID to lock the right volume
|
||||
// TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like
|
||||
// NewMounter. We could then find volumeID there without probing MountRefs.
|
||||
refs, err := mount.GetMountRefs(c.mounter, dir)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("GetMountRefs failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
glog.V(4).Infof("Directory %s is not mounted", dir)
|
||||
return fmt.Errorf("directory %s is not mounted", dir)
|
||||
}
|
||||
c.pdName = path.Base(refs[0])
|
||||
glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)
|
||||
|
||||
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
|
||||
c.plugin.volumeLocks.LockKey(c.pdName)
|
||||
defer c.plugin.volumeLocks.UnlockKey(c.pdName)
|
||||
|
||||
// Reload list of references, there might be SetUpAt finished in the meantime
|
||||
refs, err = mount.GetMountRefs(c.mounter, dir)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("GetMountRefs failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
glog.V(4).Infof("Unmount failed: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("Successfully unmounted: %s\n", dir)
|
||||
|
||||
notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if notmnt {
|
||||
if err := os.Remove(dir); err != nil {
|
||||
glog.V(4).Infof("Failed to remove directory after unmount: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cinderVolumeDeleter struct {
|
||||
*cinderVolume
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &cinderVolumeDeleter{}
|
||||
|
||||
func (r *cinderVolumeDeleter) GetPath() string {
|
||||
return getPath(r.podUID, r.volName, r.plugin.host)
|
||||
}
|
||||
|
||||
func (r *cinderVolumeDeleter) Delete() error {
|
||||
return r.manager.DeleteVolume(r)
|
||||
}
|
||||
|
||||
type cinderVolumeProvisioner struct {
|
||||
*cinderVolume
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &cinderVolumeProvisioner{}
|
||||
|
||||
func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.options.PVName,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{
|
||||
util.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Cinder: &v1.CinderPersistentVolumeSource{
|
||||
VolumeID: volumeID,
|
||||
FSType: fstype,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
MountOptions: c.options.MountOptions,
|
||||
},
|
||||
}
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
func getVolumeInfo(spec *volume.Spec) (string, string, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.Cinder != nil {
|
||||
return spec.Volume.Cinder.VolumeID, spec.Volume.Cinder.FSType, spec.Volume.Cinder.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.Cinder != nil {
|
||||
return spec.PersistentVolume.Spec.Cinder.VolumeID, spec.PersistentVolume.Spec.Cinder.FSType, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return "", "", false, fmt.Errorf("Spec does not reference a Cinder volume type")
|
||||
}
|
222
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
222
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
@ -1,222 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/cinder" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Cinder: &v1.CinderVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderPersistentVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
type fakePDManager struct {
|
||||
// How long should AttachDisk/DetachDisk take - we need slower AttachDisk in a test.
|
||||
attachDetachDuration time.Duration
|
||||
}
|
||||
|
||||
func getFakeDeviceName(host volume.VolumeHost, pdName string) string {
|
||||
return path.Join(host.GetPluginDir(cinderVolumePluginName), "device", pdName)
|
||||
}
|
||||
|
||||
// Real Cinder AttachDisk attaches a cinder volume. If it is not yet mounted,
|
||||
// it mounts it to globalPDPath.
|
||||
// We create a dummy directory (="device") and bind-mount it to globalPDPath
|
||||
func (fake *fakePDManager) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||
globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
fakeDeviceName := getFakeDeviceName(b.plugin.host, b.pdName)
|
||||
err := os.MkdirAll(fakeDeviceName, 0750)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Attaching a Cinder volume can be slow...
|
||||
time.Sleep(fake.attachDetachDuration)
|
||||
|
||||
// The volume is "attached", bind-mount it if it's not mounted yet.
|
||||
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(globalPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notmnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if notmnt {
|
||||
err = b.mounter.Mount(fakeDeviceName, globalPath, "", []string{"bind"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
|
||||
globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
|
||||
fakeDeviceName := getFakeDeviceName(c.plugin.host, c.pdName)
|
||||
// unmount the bind-mount - should be fast
|
||||
err := c.mounter.Unmount(globalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// "Detach" the fake "device"
|
||||
err = os.RemoveAll(fakeDeviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
|
||||
return "test-volume-name", 1, nil, "", nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
if cd.pdName != "test-volume-name" {
|
||||
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.pdName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cinder/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
|
||||
// Test Provisioner
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
||||
if persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID != "test-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID)
|
||||
}
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
}
|
||||
|
||||
// Test Deleter
|
||||
volSpec := &volume.Spec{
|
||||
PersistentVolume: persistentSpec,
|
||||
}
|
||||
deleter, err := plug.(*cinderPlugin).newDeleterInternal(volSpec, &fakePDManager{0})
|
||||
err = deleter.Delete()
|
||||
if err != nil {
|
||||
t.Errorf("Deleter() failed: %v", err)
|
||||
}
|
||||
}
|
262
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go
generated
vendored
262
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go
generated
vendored
@ -1,262 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
// DiskUtil has utility/helper methods
|
||||
type DiskUtil struct{}
|
||||
|
||||
// AttachDisk attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
|
||||
// Mounts the disk to its global path.
|
||||
func (util *DiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||
options := []string{}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
cloud, err := b.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceid, err := cloud.InstanceID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diskid, err := cloud.AttachDisk(instanceid, b.pdName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var devicePath string
|
||||
numTries := 0
|
||||
for {
|
||||
devicePath = cloud.GetDevicePath(diskid)
|
||||
probeAttachedVolume()
|
||||
|
||||
_, err := os.Stat(devicePath)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
numTries++
|
||||
if numTries == 10 {
|
||||
return errors.New("Could not attach disk: Timeout after 60s")
|
||||
}
|
||||
time.Sleep(time.Second * 6)
|
||||
}
|
||||
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notmnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if notmnt {
|
||||
err = b.blockDeviceMounter.FormatAndMount(devicePath, globalPDPath, b.fsType, options)
|
||||
if err != nil {
|
||||
os.Remove(globalPDPath)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachDisk unmounts the device and detaches the disk from the kubelet's host machine.
|
||||
func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
|
||||
globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName)
|
||||
if err := cd.mounter.Unmount(globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
|
||||
|
||||
cloud, err := cd.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceid, err := cloud.InstanceID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = cloud.DetachDisk(instanceid, cd.pdName); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteVolume uses the cloud entrypoint to delete specified volume
|
||||
func (util *DiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
cloud, err := cd.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = cloud.DeleteVolume(cd.pdName); err != nil {
|
||||
// OpenStack cloud provider returns volume.tryAgainError when necessary,
|
||||
// no handling needed here.
|
||||
glog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
|
||||
// TODO: caching, currently it is overkill because it calls this function
|
||||
// only when it creates dynamic PV
|
||||
zones := make(sets.String)
|
||||
nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error listing nodes")
|
||||
return zones, err
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
if zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]; ok {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("zones found: %v", zones)
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// CreateVolume uses the cloud provider entrypoint for creating a volume
|
||||
func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
|
||||
cloud, err := c.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||
volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
|
||||
vtype := ""
|
||||
availability := ""
|
||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
for k, v := range c.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "type":
|
||||
vtype = v
|
||||
case "availability":
|
||||
availability = v
|
||||
case volume.VolumeParameterFSType:
|
||||
fstype = v
|
||||
default:
|
||||
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder")
|
||||
}
|
||||
|
||||
if availability == "" {
|
||||
// No zone specified, choose one randomly in the same region
|
||||
zones, err := getZonesFromNodes(c.plugin.host.GetKubeClient())
|
||||
if err != nil {
|
||||
glog.V(2).Infof("error getting zone information: %v", err)
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
// if we did not get any zones, lets leave it blank and gophercloud will
|
||||
// use zone "nova" as default
|
||||
if len(zones) > 0 {
|
||||
availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
}
|
||||
}
|
||||
|
||||
volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
|
||||
if errr != nil {
|
||||
glog.V(2).Infof("Error creating cinder volume: %v", errr)
|
||||
return "", 0, nil, "", errr
|
||||
}
|
||||
glog.V(2).Infof("Successfully created cinder volume %s", volumeID)
|
||||
|
||||
// these are needed that pod is spawning to same AZ
|
||||
volumeLabels = make(map[string]string)
|
||||
if IgnoreVolumeAZ == false {
|
||||
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
|
||||
}
|
||||
return volumeID, volSizeGB, volumeLabels, fstype, nil
|
||||
}
|
||||
|
||||
func probeAttachedVolume() error {
|
||||
// rescan scsi bus
|
||||
scsiHostRescan()
|
||||
|
||||
executor := exec.New()
|
||||
|
||||
// udevadm settle waits for udevd to process the device creation
|
||||
// events for all hardware devices, thus ensuring that any device
|
||||
// nodes have been created successfully before proceeding.
|
||||
argsSettle := []string{"settle"}
|
||||
cmdSettle := executor.Command("udevadm", argsSettle...)
|
||||
_, errSettle := cmdSettle.CombinedOutput()
|
||||
if errSettle != nil {
|
||||
glog.Errorf("error running udevadm settle %v\n", errSettle)
|
||||
}
|
||||
|
||||
args := []string{"trigger"}
|
||||
cmd := executor.Command("udevadm", args...)
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("error running udevadm trigger %v\n", err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Successfully probed all attachments")
|
||||
return nil
|
||||
}
|
||||
|
||||
func scsiHostRescan() {
|
||||
scsiPath := "/sys/class/scsi_host/"
|
||||
if dirs, err := ioutil.ReadDir(scsiPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsiPath + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
ioutil.WriteFile(name, data, 0666)
|
||||
}
|
||||
}
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/volume/cinder/doc.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/volume/cinder/doc.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cinder contains the internal representation of cinder volumes.
|
||||
package cinder // import "k8s.io/kubernetes/pkg/volume/cinder"
|
58
vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD
generated
vendored
58
vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD
generated
vendored
@ -1,58 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"configmap.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/configmap",
|
||||
deps = [
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["configmap_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS
generated
vendored
@ -1,14 +0,0 @@
|
||||
approvers:
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- thockin
|
||||
- matchstick
|
||||
reviewers:
|
||||
- ivan4th
|
||||
- rata
|
||||
- sjenning
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
332
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go
generated
vendored
332
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go
generated
vendored
@ -1,332 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ioutil "k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugin is the entry point for plugin detection in a package.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&configMapPlugin{}}
|
||||
}
|
||||
|
||||
const (
|
||||
configMapPluginName = "kubernetes.io/configmap"
|
||||
)
|
||||
|
||||
// configMapPlugin implements the VolumePlugin interface.
|
||||
type configMapPlugin struct {
|
||||
host volume.VolumeHost
|
||||
getConfigMap func(namespace, name string) (*v1.ConfigMap, error)
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &configMapPlugin{}
|
||||
|
||||
func (plugin *configMapPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.getConfigMap = host.GetConfigMapFunc()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) GetPluginName() string {
|
||||
return configMapPluginName
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _ := getVolumeSource(spec)
|
||||
if volumeSource == nil {
|
||||
return "", fmt.Errorf("Spec does not reference a ConfigMap volume type")
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%v/%v",
|
||||
spec.Name(),
|
||||
volumeSource.Name), nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return spec.Volume != nil && spec.Volume.ConfigMap != nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) RequiresRemount() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) SupportsMountOption() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return &configMapVolumeMounter{
|
||||
configMapVolume: &configMapVolume{
|
||||
spec.Name(),
|
||||
pod.UID,
|
||||
plugin,
|
||||
plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
plugin.host.GetWriter(),
|
||||
volume.MetricsNil{},
|
||||
},
|
||||
source: *spec.Volume.ConfigMap,
|
||||
pod: *pod,
|
||||
opts: &opts,
|
||||
getConfigMap: plugin.getConfigMap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &configMapVolumeUnmounter{
|
||||
&configMapVolume{
|
||||
volName,
|
||||
podUID,
|
||||
plugin,
|
||||
plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
plugin.host.GetWriter(),
|
||||
volume.MetricsNil{},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *configMapPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
configMapVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(configMapVolume), nil
|
||||
}
|
||||
|
||||
type configMapVolume struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
plugin *configMapPlugin
|
||||
mounter mount.Interface
|
||||
writer ioutil.Writer
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
var _ volume.Volume = &configMapVolume{}
|
||||
|
||||
func (sv *configMapVolume) GetPath() string {
|
||||
return sv.plugin.host.GetPodVolumeDir(sv.podUID, strings.EscapeQualifiedNameForDisk(configMapPluginName), sv.volName)
|
||||
}
|
||||
|
||||
// configMapVolumeMounter handles retrieving secrets from the API server
|
||||
// and placing them into the volume on the host.
|
||||
type configMapVolumeMounter struct {
|
||||
*configMapVolume
|
||||
|
||||
source v1.ConfigMapVolumeSource
|
||||
pod v1.Pod
|
||||
opts *volume.VolumeOptions
|
||||
getConfigMap func(namespace, name string) (*v1.ConfigMap, error)
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &configMapVolumeMounter{}
|
||||
|
||||
func (sv *configMapVolume) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: true,
|
||||
Managed: true,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
func wrappedVolumeSpec() volume.Spec {
|
||||
// This is the spec for the volume that this plugin wraps.
|
||||
return volume.Spec{
|
||||
// This should be on a tmpfs instead of the local disk; the problem is
|
||||
// charging the memory for the tmpfs to the right cgroup. We should make
|
||||
// this a tmpfs when we can do the accounting correctly.
|
||||
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *configMapVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the setup.
|
||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optional := b.source.Optional != nil && *b.source.Optional
|
||||
configMap, err := b.getConfigMap(b.pod.Namespace, b.source.Name)
|
||||
if err != nil {
|
||||
if !(errors.IsNotFound(err) && optional) {
|
||||
glog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err)
|
||||
return err
|
||||
}
|
||||
configMap = &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: b.pod.Namespace,
|
||||
Name: b.source.Name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBytes := totalBytes(configMap)
|
||||
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
b.source.Name,
|
||||
len(configMap.Data)+len(configMap.BinaryData),
|
||||
totalBytes)
|
||||
|
||||
payload, err := MakePayload(b.source.Items, configMap, b.source.DefaultMode, optional)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating atomic writer: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Write(payload)
|
||||
if err != nil {
|
||||
glog.Errorf("Error writing payload to dir: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(b, fsGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note: this function is exported so that it can be called from the projection volume driver
|
||||
func MakePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) {
|
||||
if defaultMode == nil {
|
||||
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
|
||||
}
|
||||
|
||||
payload := make(map[string]volumeutil.FileProjection, (len(configMap.Data) + len(configMap.BinaryData)))
|
||||
var fileProjection volumeutil.FileProjection
|
||||
|
||||
if len(mappings) == 0 {
|
||||
for name, data := range configMap.Data {
|
||||
fileProjection.Data = []byte(data)
|
||||
fileProjection.Mode = *defaultMode
|
||||
payload[name] = fileProjection
|
||||
}
|
||||
for name, data := range configMap.BinaryData {
|
||||
fileProjection.Data = data
|
||||
fileProjection.Mode = *defaultMode
|
||||
payload[name] = fileProjection
|
||||
}
|
||||
} else {
|
||||
for _, ktp := range mappings {
|
||||
if stringData, ok := configMap.Data[ktp.Key]; ok {
|
||||
fileProjection.Data = []byte(stringData)
|
||||
} else if binaryData, ok := configMap.BinaryData[ktp.Key]; ok {
|
||||
fileProjection.Data = binaryData
|
||||
} else {
|
||||
if optional {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("configmap references non-existent config key: %s", ktp.Key)
|
||||
}
|
||||
|
||||
if ktp.Mode != nil {
|
||||
fileProjection.Mode = *ktp.Mode
|
||||
} else {
|
||||
fileProjection.Mode = *defaultMode
|
||||
}
|
||||
payload[ktp.Path] = fileProjection
|
||||
}
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func totalBytes(configMap *v1.ConfigMap) int {
|
||||
totalSize := 0
|
||||
for _, value := range configMap.Data {
|
||||
totalSize += len(value)
|
||||
}
|
||||
for _, value := range configMap.BinaryData {
|
||||
totalSize += len(value)
|
||||
}
|
||||
|
||||
return totalSize
|
||||
}
|
||||
|
||||
// configMapVolumeUnmounter handles cleaning up configMap volumes.
|
||||
type configMapVolumeUnmounter struct {
|
||||
*configMapVolume
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &configMapVolumeUnmounter{}
|
||||
|
||||
func (c *configMapVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
|
||||
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
|
||||
var readOnly bool
|
||||
var volumeSource *v1.ConfigMapVolumeSource
|
||||
|
||||
if spec.Volume != nil && spec.Volume.ConfigMap != nil {
|
||||
volumeSource = spec.Volume.ConfigMap
|
||||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
665
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap_test.go
generated
vendored
665
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap_test.go
generated
vendored
@ -1,665 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
func TestMakePayload(t *testing.T) {
|
||||
caseMappingMode := int32(0400)
|
||||
cases := []struct {
|
||||
name string
|
||||
mappings []v1.KeyToPath
|
||||
configMap *v1.ConfigMap
|
||||
mode int32
|
||||
optional bool
|
||||
payload map[string]util.FileProjection
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
name: "no overrides",
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo": {Data: []byte("foo"), Mode: 0644},
|
||||
"bar": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "no overrides binary data",
|
||||
configMap: &v1.ConfigMap{
|
||||
BinaryData: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo": {Data: []byte("foo"), Mode: 0644},
|
||||
"bar": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "no overrides mixed data",
|
||||
configMap: &v1.ConfigMap{
|
||||
BinaryData: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
},
|
||||
Data: map[string]string{
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo": {Data: []byte("foo"), Mode: 0644},
|
||||
"bar": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "basic 1",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "path/to/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"path/to/foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirs",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "path/to/1/2/3/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirs 2",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "path/to/1/2/3/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "subdirs 3",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "path/to/1/2/3/foo.txt",
|
||||
},
|
||||
{
|
||||
Key: "bar",
|
||||
Path: "another/path/to/the/esteemed/bar.bin",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
"another/path/to/the/esteemed/bar.bin": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "non existent key",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "zab",
|
||||
Path: "path/to/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
name: "mapping with Mode",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "foo.txt",
|
||||
Mode: &caseMappingMode,
|
||||
},
|
||||
{
|
||||
Key: "bar",
|
||||
Path: "bar.bin",
|
||||
Mode: &caseMappingMode,
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo.txt": {Data: []byte("foo"), Mode: caseMappingMode},
|
||||
"bar.bin": {Data: []byte("bar"), Mode: caseMappingMode},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "mapping with defaultMode",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "foo",
|
||||
Path: "foo.txt",
|
||||
},
|
||||
{
|
||||
Key: "bar",
|
||||
Path: "bar.bin",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo.txt": {Data: []byte("foo"), Mode: 0644},
|
||||
"bar.bin": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "optional non existent key",
|
||||
mappings: []v1.KeyToPath{
|
||||
{
|
||||
Key: "zab",
|
||||
Path: "path/to/foo.txt",
|
||||
},
|
||||
},
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
optional: true,
|
||||
payload: map[string]util.FileProjection{},
|
||||
success: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
actualPayload, err := MakePayload(tc.mappings, tc.configMap, &tc.mode, tc.optional)
|
||||
if err != nil && tc.success {
|
||||
t.Errorf("%v: unexpected failure making payload: %v", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil && !tc.success {
|
||||
t.Errorf("%v: unexpected success making payload", tc.name)
|
||||
continue
|
||||
}
|
||||
|
||||
if !tc.success {
|
||||
continue
|
||||
}
|
||||
|
||||
if e, a := tc.payload, actualPayload; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: expected and actual payload do not match", tc.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
|
||||
tempDir, err := ioutil.TempDir("/tmp", "configmap_volume_test.")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
}
|
||||
|
||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
pluginMgr := volume.VolumePluginMgr{}
|
||||
tempDir, host := newTestHost(t, nil)
|
||||
defer os.RemoveAll(tempDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plugin.GetPluginName() != configMapPluginName {
|
||||
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||
}
|
||||
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: ""}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plugin.CanSupport(&volume.Spec{}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_configmap_namespace"
|
||||
testName = "test_configmap_name"
|
||||
|
||||
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
|
||||
configMap = configMap(testNamespace, testName)
|
||||
client = fake.NewSimpleClientset(&configMap)
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
tempDir, host = newTestHost(t, client)
|
||||
)
|
||||
|
||||
defer os.RemoveAll(tempDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to GetVolumeName: %v", err)
|
||||
}
|
||||
if vName != "test_volume_name/test_configmap_name" {
|
||||
t.Errorf("Got unexpect VolumeName %v", vName)
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
doTestConfigMapDataInVolume(volumePath, configMap, t)
|
||||
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
// Test the case where the plugin's ready file exists, but the volume dir is not a
|
||||
// mountpoint, which is the state the system will be in after reboot. The dir
|
||||
// should be mounter and the configMap data written to it.
|
||||
func TestPluginReboot(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid3")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_configmap_namespace"
|
||||
testName = "test_configmap_name"
|
||||
|
||||
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
|
||||
configMap = configMap(testNamespace, testName)
|
||||
client = fake.NewSimpleClientset(&configMap)
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
rootDir, host = newTestHost(t, client)
|
||||
)
|
||||
|
||||
defer os.RemoveAll(rootDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~configmap/test_volume_name", rootDir)
|
||||
util.SetReady(podMetadataDir)
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~configmap/test_volume_name")) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
doTestConfigMapDataInVolume(volumePath, configMap, t)
|
||||
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
func TestPluginOptional(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_configmap_namespace"
|
||||
testName = "test_configmap_name"
|
||||
trueVal = true
|
||||
|
||||
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
|
||||
client = fake.NewSimpleClientset()
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
tempDir, host = newTestHost(t, client)
|
||||
)
|
||||
volumeSpec.VolumeSource.ConfigMap.Optional = &trueVal
|
||||
|
||||
defer os.RemoveAll(tempDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to GetVolumeName: %v", err)
|
||||
}
|
||||
if vName != "test_volume_name/test_configmap_name" {
|
||||
t.Errorf("Got unexpect VolumeName %v", vName)
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
datadirSymlink := path.Join(volumePath, "..data")
|
||||
datadir, err := os.Readlink(datadirSymlink)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
t.Fatalf("couldn't find volume path's data dir, %s", datadirSymlink)
|
||||
} else if err != nil {
|
||||
t.Fatalf("couldn't read symlink, %s", datadirSymlink)
|
||||
}
|
||||
datadirPath := path.Join(volumePath, datadir)
|
||||
|
||||
infos, err := ioutil.ReadDir(volumePath)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't find volume path, %s", volumePath)
|
||||
}
|
||||
if len(infos) != 0 {
|
||||
for _, fi := range infos {
|
||||
if fi.Name() != "..data" && fi.Name() != datadir {
|
||||
t.Errorf("empty data directory, %s, is not empty. Contains: %s", datadirSymlink, fi.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
infos, err = ioutil.ReadDir(datadirPath)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't find volume data path, %s", datadirPath)
|
||||
}
|
||||
if len(infos) != 0 {
|
||||
t.Errorf("empty data directory, %s, is not empty. Contains: %s", datadirSymlink, infos[0].Name())
|
||||
}
|
||||
|
||||
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
func TestPluginKeysOptional(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_configmap_namespace"
|
||||
testName = "test_configmap_name"
|
||||
trueVal = true
|
||||
|
||||
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
|
||||
configMap = configMap(testNamespace, testName)
|
||||
client = fake.NewSimpleClientset(&configMap)
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
tempDir, host = newTestHost(t, client)
|
||||
)
|
||||
volumeSpec.VolumeSource.ConfigMap.Items = []v1.KeyToPath{
|
||||
{Key: "data-1", Path: "data-1"},
|
||||
{Key: "data-2", Path: "data-2"},
|
||||
{Key: "data-3", Path: "data-3"},
|
||||
{Key: "missing", Path: "missing"},
|
||||
}
|
||||
volumeSpec.VolumeSource.ConfigMap.Optional = &trueVal
|
||||
|
||||
defer os.RemoveAll(tempDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to GetVolumeName: %v", err)
|
||||
}
|
||||
if vName != "test_volume_name/test_configmap_name" {
|
||||
t.Errorf("Got unexpect VolumeName %v", vName)
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
doTestConfigMapDataInVolume(volumePath, configMap, t)
|
||||
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
func volumeSpec(volumeName, configMapName string, defaultMode int32) *v1.Volume {
|
||||
return &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: configMapName,
|
||||
},
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func configMap(namespace, name string) v1.ConfigMap {
|
||||
return v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
"data-2": "value-2",
|
||||
"data-3": "value-3",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func doTestConfigMapDataInVolume(volumePath string, configMap v1.ConfigMap, t *testing.T) {
|
||||
for key, value := range configMap.Data {
|
||||
configMapDataHostPath := path.Join(volumePath, key)
|
||||
if _, err := os.Stat(configMapDataHostPath); err != nil {
|
||||
t.Fatalf("SetUp() failed, couldn't find configMap data on disk: %v", configMapDataHostPath)
|
||||
} else {
|
||||
actualValue, err := ioutil.ReadFile(configMapDataHostPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't read configMap data from: %v", configMapDataHostPath)
|
||||
}
|
||||
|
||||
if value != string(actualValue) {
|
||||
t.Errorf("Unexpected value; expected %q, got %q", value, actualValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doTestCleanAndTeardown(plugin volume.VolumePlugin, podUID types.UID, testVolumeName, volumePath string, t *testing.T) {
|
||||
unmounter, err := plugin.NewUnmounter(testVolumeName, podUID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Fatalf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/volume/configmap/doc.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/volume/configmap/doc.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package configmap contains the internal representation of configMap volumes.
|
||||
package configmap // import "k8s.io/kubernetes/pkg/volume/configmap"
|
83
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
83
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
@ -1,83 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"csi_attacher.go",
|
||||
"csi_block.go",
|
||||
"csi_client.go",
|
||||
"csi_mounter.go",
|
||||
"csi_plugin.go",
|
||||
"csi_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csi/labelmanager:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"csi_attacher_test.go",
|
||||
"csi_block_test.go",
|
||||
"csi_client_test.go",
|
||||
"csi_mounter_test.go",
|
||||
"csi_plugin_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csi/fake:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/volume/csi/fake:all-srcs",
|
||||
"//pkg/volume/csi/labelmanager:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS
generated
vendored
@ -1,4 +0,0 @@
|
||||
approvers:
|
||||
- jsafrane
|
||||
- saad-ali
|
||||
- vladimirvivien
|
603
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
603
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
@ -1,603 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
persistentVolumeInGlobalPath = "pv"
|
||||
globalMountInGlobalPath = "globalmount"
|
||||
)
|
||||
|
||||
type csiAttacher struct {
|
||||
plugin *csiPlugin
|
||||
k8s kubernetes.Interface
|
||||
waitSleepTime time.Duration
|
||||
|
||||
csiClient csiClient
|
||||
}
|
||||
|
||||
// volume.Attacher methods
|
||||
var _ volume.Attacher = &csiAttacher{}
|
||||
|
||||
func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
if spec == nil {
|
||||
glog.Error(log("attacher.Attach missing volume.Spec"))
|
||||
return "", errors.New("missing spec")
|
||||
}
|
||||
|
||||
csiSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
node := string(nodeName)
|
||||
pvName := spec.PersistentVolume.GetName()
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
|
||||
|
||||
attachment := &storage.VolumeAttachment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: attachID,
|
||||
},
|
||||
Spec: storage.VolumeAttachmentSpec{
|
||||
NodeName: node,
|
||||
Attacher: csiSource.Driver,
|
||||
Source: storage.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
},
|
||||
Status: storage.VolumeAttachmentStatus{Attached: false},
|
||||
}
|
||||
|
||||
_, err = c.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
alreadyExist := false
|
||||
if err != nil {
|
||||
if !apierrs.IsAlreadyExists(err) {
|
||||
glog.Error(log("attacher.Attach failed: %v", err))
|
||||
return "", err
|
||||
}
|
||||
alreadyExist = true
|
||||
}
|
||||
|
||||
if alreadyExist {
|
||||
glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
|
||||
} else {
|
||||
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
|
||||
}
|
||||
|
||||
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
|
||||
|
||||
return attachID, nil
|
||||
}
|
||||
|
||||
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) {
|
||||
source, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) {
|
||||
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
|
||||
|
||||
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
|
||||
defer timer.Stop()
|
||||
|
||||
return c.waitForVolumeAttachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) {
|
||||
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
|
||||
return "", err
|
||||
}
|
||||
// if being deleted, fail fast
|
||||
if attach.GetDeletionTimestamp() != nil {
|
||||
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment is being deleted")
|
||||
}
|
||||
// attachment OK
|
||||
if attach.Status.Attached {
|
||||
return attachID, nil
|
||||
}
|
||||
// driver reports attach error
|
||||
attachErr := attach.Status.AttachError
|
||||
if attachErr != nil {
|
||||
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
|
||||
return "", errors.New(attachErr.Message)
|
||||
}
|
||||
|
||||
watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("watch error:%v for volume %v", err, volumeHandle)
|
||||
}
|
||||
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
|
||||
return "", errors.New("volume attachment watch channel had been closed")
|
||||
}
|
||||
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
attach, _ := event.Object.(*storage.VolumeAttachment)
|
||||
// if being deleted, fail fast
|
||||
if attach.GetDeletionTimestamp() != nil {
|
||||
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment is being deleted")
|
||||
}
|
||||
// attachment OK
|
||||
if attach.Status.Attached {
|
||||
return attachID, nil
|
||||
}
|
||||
// driver reports attach error
|
||||
attachErr := attach.Status.AttachError
|
||||
if attachErr != nil {
|
||||
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
|
||||
return "", errors.New(attachErr.Message)
|
||||
}
|
||||
case watch.Deleted:
|
||||
// if deleted, fail fast
|
||||
glog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment has been deleted")
|
||||
|
||||
case watch.Error:
|
||||
// start another cycle
|
||||
c.waitForVolumeAttachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
|
||||
return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
|
||||
|
||||
attached := make(map[*volume.Spec]bool)
|
||||
|
||||
for _, spec := range specs {
|
||||
if spec == nil {
|
||||
glog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
|
||||
return nil, errors.New("missing spec")
|
||||
}
|
||||
source, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
|
||||
continue
|
||||
}
|
||||
|
||||
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
|
||||
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
|
||||
continue
|
||||
}
|
||||
glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
|
||||
attached[spec] = attach.Status.Attached
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
glog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec))
|
||||
deviceMountPath, err := makeDeviceMountPath(c.plugin, spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err))
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath)
|
||||
return deviceMountPath, nil
|
||||
}
|
||||
|
||||
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) (err error) {
|
||||
glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
||||
|
||||
mounted, err := isDirMounted(c.plugin, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath))
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted {
|
||||
glog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Setup
|
||||
if spec == nil {
|
||||
return fmt.Errorf("attacher.MountDevice failed, spec is nil")
|
||||
}
|
||||
csiSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Store volume metadata for UnmountDevice. Keep it around even if the
|
||||
// driver does not support NodeStage, UnmountDevice still needs it.
|
||||
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
|
||||
dataDir := filepath.Dir(deviceMountPath)
|
||||
data := map[string]string{
|
||||
volDataKey.volHandle: csiSource.VolumeHandle,
|
||||
volDataKey.driverName: csiSource.Driver,
|
||||
}
|
||||
if err = saveVolumeData(dataDir, volDataFileName, data); err != nil {
|
||||
glog.Error(log("failed to save volume info data: %v", err))
|
||||
if cleanerr := os.RemoveAll(dataDir); err != nil {
|
||||
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// clean up metadata
|
||||
glog.Errorf(log("attacher.MountDevice failed: %v", err))
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if c.csiClient == nil {
|
||||
c.csiClient = newCsiDriverClient(csiSource.Driver)
|
||||
}
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start MountDevice
|
||||
if deviceMountPath == "" {
|
||||
err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
|
||||
return err
|
||||
}
|
||||
|
||||
nodeName := string(c.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return err // This err already has enough context ("VolumeAttachment xyz not found")
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
err = errors.New("no existing VolumeAttachment found")
|
||||
return err
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
nodeStageSecrets := map[string]string{}
|
||||
if csiSource.NodeStageSecretRef != nil {
|
||||
nodeStageSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fetching NodeStageSecretRef %s/%s failed: %v",
|
||||
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
fsType := csiSource.FSType
|
||||
err = csi.NodeStageVolume(ctx,
|
||||
csiSource.VolumeHandle,
|
||||
publishVolumeInfo,
|
||||
deviceMountPath,
|
||||
fsType,
|
||||
accessMode,
|
||||
nodeStageSecrets,
|
||||
csiSource.VolumeAttributes)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &csiAttacher{}
|
||||
|
||||
func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
|
||||
if volumeName == "" {
|
||||
glog.Error(log("detacher.Detach missing value for parameter volumeName"))
|
||||
return errors.New("missing expected parameter volumeName")
|
||||
}
|
||||
parts := strings.Split(volumeName, volNameSep)
|
||||
if len(parts) != 2 {
|
||||
glog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
|
||||
return errors.New("volumeName missing expected data")
|
||||
}
|
||||
|
||||
driverName := parts[0]
|
||||
volID := parts[1]
|
||||
attachID := getAttachmentName(volID, driverName, string(nodeName))
|
||||
if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
// object deleted or never existed, done
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
|
||||
return nil
|
||||
}
|
||||
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
|
||||
return c.waitForVolumeDetachment(volID, attachID)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error {
|
||||
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
|
||||
|
||||
timeout := c.waitSleepTime * 10
|
||||
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
|
||||
defer timer.Stop()
|
||||
|
||||
return c.waitForVolumeDetachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error {
|
||||
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
//object deleted or never existed, done
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
|
||||
return nil
|
||||
}
|
||||
glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
|
||||
return err
|
||||
}
|
||||
// driver reports attach error
|
||||
detachErr := attach.Status.DetachError
|
||||
if detachErr != nil {
|
||||
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
|
||||
return errors.New(detachErr.Message)
|
||||
}
|
||||
|
||||
watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("watch error:%v for volume %v", err, volumeHandle)
|
||||
}
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
|
||||
return errors.New("volume attachment watch channel had been closed")
|
||||
}
|
||||
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
attach, _ := event.Object.(*storage.VolumeAttachment)
|
||||
// driver reports attach error
|
||||
detachErr := attach.Status.DetachError
|
||||
if detachErr != nil {
|
||||
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
|
||||
return errors.New(detachErr.Message)
|
||||
}
|
||||
case watch.Deleted:
|
||||
//object deleted
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle))
|
||||
return nil
|
||||
|
||||
case watch.Error:
|
||||
// start another cycle
|
||||
c.waitForVolumeDetachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
|
||||
return fmt.Errorf("detachment timeout for volume %v", volumeHandle)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
|
||||
|
||||
// Setup
|
||||
var driverName, volID string
|
||||
dataDir := filepath.Dir(deviceMountPath)
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err == nil {
|
||||
driverName = data[volDataKey.driverName]
|
||||
volID = data[volDataKey.volHandle]
|
||||
} else {
|
||||
glog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err))
|
||||
|
||||
// The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server
|
||||
driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.csiClient == nil {
|
||||
c.csiClient = newCsiDriverClient(driverName)
|
||||
}
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
|
||||
return err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
|
||||
// Just delete the global directory + json file
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start UnmountDevice
|
||||
err = csi.NodeUnstageVolume(ctx,
|
||||
volID,
|
||||
deviceMountPath)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the global directory + json file
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasStageUnstageCapability(ctx context.Context, csi csiClient) (bool, error) {
|
||||
capabilities, err := csi.NodeGetCapabilities(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
stageUnstageSet := false
|
||||
if capabilities == nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, capability := range capabilities {
|
||||
if capability.GetRpc().GetType() == csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
|
||||
stageUnstageSet = true
|
||||
}
|
||||
}
|
||||
return stageUnstageSet, nil
|
||||
}
|
||||
|
||||
// getAttachmentName returns csi-<sha252(volName,csiDriverName,NodeName>
|
||||
func getAttachmentName(volName, csiDriverName, nodeName string) string {
|
||||
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))
|
||||
return fmt.Sprintf("csi-%x", result)
|
||||
}
|
||||
|
||||
func makeDeviceMountPath(plugin *csiPlugin, spec *volume.Spec) (string, error) {
|
||||
if spec == nil {
|
||||
return "", fmt.Errorf("makeDeviceMountPath failed, spec is nil")
|
||||
}
|
||||
|
||||
pvName := spec.PersistentVolume.Name
|
||||
if pvName == "" {
|
||||
return "", fmt.Errorf("makeDeviceMountPath failed, pv name empty")
|
||||
}
|
||||
|
||||
return path.Join(plugin.host.GetPluginDir(plugin.GetPluginName()), persistentVolumeInGlobalPath, pvName, globalMountInGlobalPath), nil
|
||||
}
|
||||
|
||||
func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMountPath string) (string, string, error) {
|
||||
// deviceMountPath structure: /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}/globalmount
|
||||
dir := filepath.Dir(deviceMountPath)
|
||||
if file := filepath.Base(deviceMountPath); file != globalMountInGlobalPath {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, path did not end in %s", globalMountInGlobalPath)
|
||||
}
|
||||
// dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}
|
||||
pvName := filepath.Base(dir)
|
||||
|
||||
// Get PV and check for errors
|
||||
pv, err := k8s.CoreV1().PersistentVolumes().Get(pvName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if pv == nil || pv.Spec.CSI == nil {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath could not find CSI Persistent Volume Source for pv: %s", pvName)
|
||||
}
|
||||
|
||||
// Get VolumeHandle and PluginName from pv
|
||||
csiSource := pv.Spec.CSI
|
||||
if csiSource.Driver == "" {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, driver name empty")
|
||||
}
|
||||
if csiSource.VolumeHandle == "" {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, VolumeHandle empty")
|
||||
}
|
||||
|
||||
return csiSource.Driver, csiSource.VolumeHandle, nil
|
||||
}
|
775
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher_test.go
generated
vendored
775
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher_test.go
generated
vendored
@ -1,775 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
|
||||
return &storage.VolumeAttachment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: attachID,
|
||||
},
|
||||
Spec: storage.VolumeAttachmentSpec{
|
||||
NodeName: nodeName,
|
||||
Attacher: "mock",
|
||||
Source: storage.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
},
|
||||
Status: storage.VolumeAttachmentStatus{
|
||||
Attached: false,
|
||||
AttachError: nil,
|
||||
DetachError: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherAttach(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
nodeName string
|
||||
driverName string
|
||||
volumeName string
|
||||
attachID string
|
||||
injectAttacherError bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "test ok 1",
|
||||
nodeName: "testnode-01",
|
||||
driverName: "testdriver-01",
|
||||
volumeName: "testvol-01",
|
||||
attachID: getAttachmentName("testvol-01", "testdriver-01", "testnode-01"),
|
||||
},
|
||||
{
|
||||
name: "test ok 2",
|
||||
nodeName: "node02",
|
||||
driverName: "driver02",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
},
|
||||
{
|
||||
name: "mismatch vol",
|
||||
nodeName: "node02",
|
||||
driverName: "driver02",
|
||||
volumeName: "vol01",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "mismatch driver",
|
||||
nodeName: "node02",
|
||||
driverName: "driver000",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "mismatch node",
|
||||
nodeName: "node000",
|
||||
driverName: "driver000",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "attacher error",
|
||||
nodeName: "node02",
|
||||
driverName: "driver02",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
injectAttacherError: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
// attacher loop
|
||||
for i, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
spec := volume.NewSpecFromPersistentVolume(makeTestPV(fmt.Sprintf("test-pv%d", i), 10, tc.driverName, tc.volumeName), false)
|
||||
|
||||
go func(id, nodename string, fail bool) {
|
||||
attachID, err := csiAttacher.Attach(spec, types.NodeName(nodename))
|
||||
if !fail && err != nil {
|
||||
t.Errorf("expecting no failure, but got err: %v", err)
|
||||
}
|
||||
if fail && err == nil {
|
||||
t.Errorf("expecting failure, but got no err")
|
||||
}
|
||||
if attachID != id && !fail {
|
||||
t.Errorf("expecting attachID %v, got %v", id, attachID)
|
||||
}
|
||||
}(tc.attachID, tc.nodeName, tc.shouldFail)
|
||||
|
||||
// update attachment to avoid long waitForAttachment
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
// wait for attachment to be saved
|
||||
var attach *storage.VolumeAttachment
|
||||
for i := 0; i < 100; i++ {
|
||||
attach, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
<-ticker.C
|
||||
continue
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
if attach != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if attach == nil {
|
||||
t.Logf("attachment not found for id:%v", tc.attachID)
|
||||
} else {
|
||||
if tc.injectAttacherError {
|
||||
attach.Status.Attached = false
|
||||
attach.Status.AttachError = &storage.VolumeError{
|
||||
Message: "attacher error",
|
||||
}
|
||||
} else {
|
||||
attach.Status.Attached = true
|
||||
}
|
||||
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
fakeWatcher.Modify(attach)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
nodeName := "test-node"
|
||||
testCases := []struct {
|
||||
name string
|
||||
initAttached bool
|
||||
finalAttached bool
|
||||
trigerWatchEventTime time.Duration
|
||||
initAttachErr *storage.VolumeError
|
||||
finalAttachErr *storage.VolumeError
|
||||
timeout time.Duration
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "attach success at get",
|
||||
initAttached: true,
|
||||
timeout: 50 * time.Millisecond,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
name: "attachment error ant get",
|
||||
initAttachErr: &storage.VolumeError{Message: "missing volume"},
|
||||
timeout: 30 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "attach success at watch",
|
||||
initAttached: false,
|
||||
finalAttached: true,
|
||||
trigerWatchEventTime: 5 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
name: "attachment error ant watch",
|
||||
initAttached: false,
|
||||
finalAttached: false,
|
||||
finalAttachErr: &storage.VolumeError{Message: "missing volume"},
|
||||
trigerWatchEventTime: 5 * time.Millisecond,
|
||||
timeout: 30 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "time ran out",
|
||||
initAttached: false,
|
||||
finalAttached: true,
|
||||
trigerWatchEventTime: 100 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
t.Logf("running test: %v", tc.name)
|
||||
pvName := fmt.Sprintf("test-pv-%d", i)
|
||||
volID := fmt.Sprintf("test-vol-%d", i)
|
||||
attachID := getAttachmentName(volID, testDriver, nodeName)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = tc.initAttached
|
||||
attachment.Status.AttachError = tc.initAttachErr
|
||||
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
|
||||
trigerWatchEventTime := tc.trigerWatchEventTime
|
||||
finalAttached := tc.finalAttached
|
||||
finalAttachErr := tc.finalAttachErr
|
||||
// after timeout, fakeWatcher will be closed by csiAttacher.waitForVolumeAttachment
|
||||
if tc.trigerWatchEventTime > 0 && tc.trigerWatchEventTime < tc.timeout {
|
||||
go func() {
|
||||
time.Sleep(trigerWatchEventTime)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = finalAttached
|
||||
attachment.Status.AttachError = finalAttachErr
|
||||
fakeWatcher.Modify(attachment)
|
||||
}()
|
||||
}
|
||||
|
||||
retID, err := csiAttacher.waitForVolumeAttachment(volID, attachID, tc.timeout)
|
||||
if tc.shouldFail && err == nil {
|
||||
t.Error("expecting failure, but err is nil")
|
||||
}
|
||||
if tc.initAttachErr != nil {
|
||||
if tc.initAttachErr.Message != err.Error() {
|
||||
t.Errorf("expecting error [%v], got [%v]", tc.initAttachErr.Message, err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil && retID != attachID {
|
||||
t.Errorf("attacher.WaitForAttach not returning attachment ID")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherVolumesAreAttached(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
nodeName := "test-node"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
attachedStats map[string]bool
|
||||
}{
|
||||
{"attach + detach", map[string]bool{"vol-01": true, "vol-02": true, "vol-03": false, "vol-04": false, "vol-05": true}},
|
||||
{"all detached", map[string]bool{"vol-11": false, "vol-12": false, "vol-13": false, "vol-14": false, "vol-15": false}},
|
||||
{"all attached", map[string]bool{"vol-21": true, "vol-22": true, "vol-23": true, "vol-24": true, "vol-25": true}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
var specs []*volume.Spec
|
||||
// create and save volume attchments
|
||||
for volName, stat := range tc.attachedStats {
|
||||
pv := makeTestPV("test-pv", 10, testDriver, volName)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
specs = append(specs, spec)
|
||||
attachID := getAttachmentName(volName, testDriver, nodeName)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pv.GetName())
|
||||
attachment.Status.Attached = stat
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// retrieve attached status
|
||||
stats, err := csiAttacher.VolumesAreAttached(specs, types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(tc.attachedStats) != len(stats) {
|
||||
t.Errorf("expecting %d attachment status, got %d", len(tc.attachedStats), len(stats))
|
||||
}
|
||||
|
||||
// compare attachment status for each spec
|
||||
for spec, stat := range stats {
|
||||
source, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if stat != tc.attachedStats[source.VolumeHandle] {
|
||||
t.Errorf("expecting volume attachment %t, got %t", tc.attachedStats[source.VolumeHandle], stat)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherDetach(t *testing.T) {
|
||||
|
||||
nodeName := "test-node"
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
attachID string
|
||||
shouldFail bool
|
||||
reactor func(action core.Action) (handled bool, ret runtime.Object, err error)
|
||||
}{
|
||||
{name: "normal test", volID: "vol-001", attachID: getAttachmentName("vol-001", testDriver, nodeName)},
|
||||
{name: "normal test 2", volID: "vol-002", attachID: getAttachmentName("vol-002", testDriver, nodeName)},
|
||||
{name: "object not found", volID: "vol-non-existing", attachID: getAttachmentName("vol-003", testDriver, nodeName)},
|
||||
{
|
||||
name: "API error",
|
||||
volID: "vol-004",
|
||||
attachID: getAttachmentName("vol-004", testDriver, nodeName),
|
||||
shouldFail: true, // All other API errors should be propagated to caller
|
||||
reactor: func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
// return Forbidden to all DELETE requests
|
||||
if action.Matches("delete", "volumeattachments") {
|
||||
return true, nil, apierrs.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error"))
|
||||
}
|
||||
return false, nil, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("running test: %v", tc.name)
|
||||
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
if tc.reactor != nil {
|
||||
client.PrependReactor("*", "*", tc.reactor)
|
||||
}
|
||||
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, tc.volID)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
attachment := makeTestAttachment(tc.attachID, nodeName, "test-pv")
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
volumeName, err := plug.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
t.Errorf("test case %s failed: %v", tc.name, err)
|
||||
}
|
||||
go func() {
|
||||
fakeWatcher.Delete(attachment)
|
||||
}()
|
||||
err = csiAttacher.Detach(volumeName, types.NodeName(nodeName))
|
||||
if tc.shouldFail && err == nil {
|
||||
t.Fatal("expecting failure, but err = nil")
|
||||
}
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
attach, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if !apierrs.IsNotFound(err) {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
} else {
|
||||
if attach == nil {
|
||||
t.Errorf("expecting attachment not to be nil, but it is")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherGetDeviceMountPath(t *testing.T) {
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, _, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
pluginDir := csiAttacher.plugin.host.GetPluginDir(plug.GetPluginName())
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
pvName string
|
||||
expectedMountPath string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal test",
|
||||
pvName: "test-pv1",
|
||||
expectedMountPath: pluginDir + "/pv/test-pv1/globalmount",
|
||||
},
|
||||
{
|
||||
testName: "no pv name",
|
||||
pvName: "",
|
||||
expectedMountPath: pluginDir + "/pv/test-pv1/globalmount",
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
var spec *volume.Spec
|
||||
|
||||
// Create spec
|
||||
pv := makeTestPV(tc.pvName, 10, testDriver, "testvol")
|
||||
spec = volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// Run
|
||||
mountPath, err := csiAttacher.GetDeviceMountPath(spec)
|
||||
|
||||
// Verify
|
||||
if err != nil && !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
} else if err == nil {
|
||||
if tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
} else if mountPath != tc.expectedMountPath {
|
||||
t.Errorf("mountPath does not equal expectedMountPath. Got: %s. Expected: %s", mountPath, tc.expectedMountPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherMountDevice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
volName string
|
||||
devicePath string
|
||||
deviceMountPath string
|
||||
stageUnstageSet bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "no vol name",
|
||||
volName: "",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "no device path",
|
||||
volName: "test-vol1",
|
||||
devicePath: "",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
testName: "no device mount path",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage cap not set",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
var spec *volume.Spec
|
||||
pvName := "test-pv"
|
||||
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
||||
|
||||
nodeName := string(csiAttacher.plugin.host.GetNodeName())
|
||||
|
||||
// Create spec
|
||||
pv := makeTestPV(pvName, 10, testDriver, tc.volName)
|
||||
spec = volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
attachID := getAttachmentName(tc.volName, testDriver, nodeName)
|
||||
|
||||
// Set up volume attachment
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
go func() {
|
||||
fakeWatcher.Delete(attachment)
|
||||
}()
|
||||
|
||||
// Run
|
||||
err = csiAttacher.MountDevice(spec, tc.devicePath, tc.deviceMountPath)
|
||||
|
||||
// Verify
|
||||
if err != nil {
|
||||
if !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err == nil && tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
}
|
||||
|
||||
// Verify call goes through all the way
|
||||
numStaged := 1
|
||||
if !tc.stageUnstageSet {
|
||||
numStaged = 0
|
||||
}
|
||||
|
||||
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
|
||||
staged := cdc.nodeClient.GetNodeStagedVolumes()
|
||||
if len(staged) != numStaged {
|
||||
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", numStaged, len(staged))
|
||||
}
|
||||
if tc.stageUnstageSet {
|
||||
gotPath, ok := staged[tc.volName]
|
||||
if !ok {
|
||||
t.Errorf("could not find staged volume: %s", tc.volName)
|
||||
}
|
||||
if gotPath != tc.deviceMountPath {
|
||||
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, gotPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherUnmountDevice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
volID string
|
||||
deviceMountPath string
|
||||
jsonFile string
|
||||
createPV bool
|
||||
stageUnstageSet bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal, json file exists",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: `{"driverName": "csi", "volumeHandle":"project/zone/test-vol1"}`,
|
||||
createPV: false,
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "normal, json file doesn't exist -> use PV",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: "",
|
||||
createPV: true,
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "invalid json -> use PV",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: `{"driverName"}}`,
|
||||
createPV: true,
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "no json, no PV.volID",
|
||||
volID: "",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: "",
|
||||
createPV: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "no json, no PV",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: "",
|
||||
createPV: false,
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set no vars should not fail",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, _, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
||||
|
||||
if tc.deviceMountPath != "" {
|
||||
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
|
||||
}
|
||||
|
||||
// Add the volume to NodeStagedVolumes
|
||||
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
|
||||
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
|
||||
|
||||
// Make JSON for this object
|
||||
if tc.deviceMountPath != "" {
|
||||
if err := os.MkdirAll(tc.deviceMountPath, 0755); err != nil {
|
||||
t.Fatalf("error creating directory %s: %s", tc.deviceMountPath, err)
|
||||
}
|
||||
}
|
||||
dir := filepath.Dir(tc.deviceMountPath)
|
||||
if tc.jsonFile != "" {
|
||||
dataPath := filepath.Join(dir, volDataFileName)
|
||||
if err := ioutil.WriteFile(dataPath, []byte(tc.jsonFile), 0644); err != nil {
|
||||
t.Fatalf("error creating %s: %s", dataPath, err)
|
||||
}
|
||||
}
|
||||
if tc.createPV {
|
||||
// Make the PV for this object
|
||||
pvName := filepath.Base(dir)
|
||||
pv := makeTestPV(pvName, 5, "csi", tc.volID)
|
||||
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
|
||||
if err != nil && !tc.shouldFail {
|
||||
t.Fatalf("Failed to create PV: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run
|
||||
err := csiAttacher.UnmountDevice(tc.deviceMountPath)
|
||||
// Verify
|
||||
if err != nil {
|
||||
if !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err == nil && tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
}
|
||||
|
||||
// Verify call goes through all the way
|
||||
expectedSet := 0
|
||||
if !tc.stageUnstageSet {
|
||||
expectedSet = 1
|
||||
}
|
||||
staged := cdc.nodeClient.GetNodeStagedVolumes()
|
||||
if len(staged) != expectedSet {
|
||||
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", expectedSet, len(staged))
|
||||
}
|
||||
|
||||
_, ok := staged[tc.volID]
|
||||
if ok && tc.stageUnstageSet {
|
||||
t.Errorf("found unexpected staged volume: %s", tc.volID)
|
||||
} else if !ok && !tc.stageUnstageSet {
|
||||
t.Errorf("could not find expected staged volume: %s", tc.volID)
|
||||
}
|
||||
|
||||
if tc.jsonFile != "" && !tc.shouldFail {
|
||||
dataPath := filepath.Join(dir, volDataFileName)
|
||||
if _, err := os.Stat(dataPath); !os.IsNotExist(err) {
|
||||
if err != nil {
|
||||
t.Errorf("error checking file %s: %s", dataPath, err)
|
||||
} else {
|
||||
t.Errorf("json file %s should not exists, but it does", dataPath)
|
||||
}
|
||||
} else {
|
||||
t.Logf("json file %s was correctly removed", dataPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create a plugin mgr to load plugins and setup a fake client
|
||||
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't create temp dir: %v", err)
|
||||
}
|
||||
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
fakeWatcher := watch.NewRaceFreeFake()
|
||||
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
|
||||
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
|
||||
host := volumetest.NewFakeVolumeHost(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
)
|
||||
plugMgr := &volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(csiPluginName)
|
||||
if err != nil {
|
||||
t.Fatalf("can't find plugin %v", csiPluginName)
|
||||
}
|
||||
|
||||
csiPlug, ok := plug.(*csiPlugin)
|
||||
if !ok {
|
||||
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
||||
}
|
||||
|
||||
return csiPlug, fakeWatcher, tmpDir, fakeClient
|
||||
}
|
283
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
generated
vendored
283
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
generated
vendored
@ -1,283 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
type csiBlockMapper struct {
|
||||
k8s kubernetes.Interface
|
||||
csiClient csiClient
|
||||
plugin *csiPlugin
|
||||
driverName string
|
||||
specName string
|
||||
volumeID string
|
||||
readOnly bool
|
||||
spec *volume.Spec
|
||||
podUID types.UID
|
||||
volumeInfo map[string]string
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &csiBlockMapper{}
|
||||
|
||||
// GetGlobalMapPath returns a path (on the node) where the devicePath will be symlinked to
|
||||
// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}
|
||||
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host)
|
||||
glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod's device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~csi/, {volumeID}
|
||||
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
|
||||
path, specName := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName
|
||||
glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath = %s", path))
|
||||
return path, specName
|
||||
}
|
||||
|
||||
// SetUpDevice ensures the device is attached returns path where the device is located.
|
||||
func (m *csiBlockMapper) SetUpDevice() (string, error) {
|
||||
if !m.plugin.blockEnabled {
|
||||
return "", errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.SetupDevice called"))
|
||||
|
||||
if m.spec == nil {
|
||||
glog.Error(log("blockMapper.Map spec is nil"))
|
||||
return "", fmt.Errorf("spec is nil")
|
||||
}
|
||||
csiSource, err := getCSISourceFromSpec(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
globalMapPath, err := m.GetGlobalMapPath(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
csi := m.csiClient
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
||||
return "", err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Start MountDevice
|
||||
nodeName := string(m.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return "", errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
nodeStageSecrets := map[string]string{}
|
||||
if csiSource.NodeStageSecretRef != nil {
|
||||
nodeStageSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodeStageSecretRef)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get NodeStageSecretRef %s/%s: %v",
|
||||
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create globalMapPath before call to NodeStageVolume
|
||||
if err := os.MkdirAll(globalMapPath, 0750); err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err))
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if m.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
err = csi.NodeStageVolume(ctx,
|
||||
csiSource.VolumeHandle,
|
||||
publishVolumeInfo,
|
||||
globalMapPath,
|
||||
fsTypeBlockName,
|
||||
accessMode,
|
||||
nodeStageSecrets,
|
||||
csiSource.VolumeAttributes)
|
||||
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed: %v", err))
|
||||
if err := os.RemoveAll(globalMapPath); err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err))
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPath))
|
||||
return globalMapPath, nil
|
||||
}
|
||||
|
||||
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
if !m.plugin.blockEnabled {
|
||||
return errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath))
|
||||
|
||||
if m.spec == nil {
|
||||
glog.Error(log("blockMapper.MapDevice spec is nil"))
|
||||
return fmt.Errorf("spec is nil")
|
||||
}
|
||||
|
||||
csiSource, err := getCSISourceFromSpec(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.Map failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
dir := filepath.Join(volumeMapPath, volumeMapName)
|
||||
csi := m.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
nodeName := string(m.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
nodePublishSecrets := map[string]string{}
|
||||
if csiSource.NodePublishSecretRef != nil {
|
||||
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
|
||||
if err != nil {
|
||||
glog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v",
|
||||
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to create dir %#v: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("blockMapper.MapDevice created NodePublish path [%s]", dir))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if m.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
err = csi.NodePublishVolume(
|
||||
ctx,
|
||||
m.volumeID,
|
||||
m.readOnly,
|
||||
globalMapPath,
|
||||
dir,
|
||||
accessMode,
|
||||
publishVolumeInfo,
|
||||
csiSource.VolumeAttributes,
|
||||
nodePublishSecrets,
|
||||
fsTypeBlockName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("blockMapper.MapDevice failed: %v", err))
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
|
||||
|
||||
// TearDownDevice removes traces of the SetUpDevice.
|
||||
func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error {
|
||||
if !m.plugin.blockEnabled {
|
||||
return errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath))
|
||||
|
||||
csi := m.csiClient
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// unmap global device map path
|
||||
if err := csi.NodeUnstageVolume(ctx, m.volumeID, globalMapPath); err != nil {
|
||||
glog.Errorf(log("blockMapper.TearDownDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath))
|
||||
|
||||
// request to remove pod volume map path also
|
||||
podVolumePath, volumeName := m.GetPodDeviceMapPath()
|
||||
podVolumeMapPath := filepath.Join(podVolumePath, volumeName)
|
||||
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, podVolumeMapPath); err != nil {
|
||||
glog.Error(log("blockMapper.TearDownDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath))
|
||||
|
||||
return nil
|
||||
}
|
264
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block_test.go
generated
vendored
264
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block_test.go
generated
vendored
@ -1,264 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// TODO (vladimirvivien) specName with slashes will not work
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolumeName string
|
||||
path string
|
||||
}{
|
||||
{
|
||||
name: "simple specName",
|
||||
specVolumeName: "spec-0",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/%s/%s", "spec-0", "dev")),
|
||||
},
|
||||
{
|
||||
name: "specName with dots",
|
||||
specVolumeName: "test.spec.1",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/%s/%s", "test.spec.1", "dev")),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
|
||||
path, err := csiMapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Errorf("mapper GetGlobalMapPath failed: %v", err)
|
||||
}
|
||||
|
||||
if tc.path != path {
|
||||
t.Errorf("expecting path %s, got %s", tc.path, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperSetupDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
pvName := pv.GetName()
|
||||
nodeName := string(plug.host.GetNodeName())
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// MapDevice
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
csiMapper.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = true
|
||||
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
t.Log("created attachement ", attachID)
|
||||
|
||||
devicePath, err := csiMapper.SetUpDevice()
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to SetupDevice: %v", err)
|
||||
}
|
||||
|
||||
globalMapPath, err := csiMapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
if devicePath != globalMapPath {
|
||||
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, globalMapPath)
|
||||
}
|
||||
|
||||
vols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
|
||||
if vols[csiMapper.volumeID] != devicePath {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperMapDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
pvName := pv.GetName()
|
||||
nodeName := string(plug.host.GetNodeName())
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// MapDevice
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
csiMapper.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = true
|
||||
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
t.Log("created attachement ", attachID)
|
||||
|
||||
devicePath, err := csiMapper.SetUpDevice()
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to SetupDevice: %v", err)
|
||||
}
|
||||
globalMapPath, err := csiMapper.GetGlobalMapPath(csiMapper.spec)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
// Map device to global and pod device map path
|
||||
volumeMapPath, volName := csiMapper.GetPodDeviceMapPath()
|
||||
err = csiMapper.MapDevice(devicePath, globalMapPath, volumeMapPath, volName, csiMapper.podUID)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(volumeMapPath, volName)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("mapper.MapDevice failed, volume path not created: %s", volumeMapPath)
|
||||
} else {
|
||||
t.Errorf("mapper.MapDevice failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
pubs := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if pubs[csiMapper.volumeID] != volumeMapPath {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperTearDownDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// save volume data
|
||||
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
dir,
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make a new Unmapper: %v", err)
|
||||
}
|
||||
|
||||
csiUnmapper := unmapper.(*csiBlockMapper)
|
||||
csiUnmapper.csiClient = setupClient(t, true)
|
||||
|
||||
globalMapPath, err := csiUnmapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Fatalf("unmapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
err = csiUnmapper.TearDownDevice(globalMapPath, "/dev/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ensure csi client call and node unstaged
|
||||
vols := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
|
||||
if _, ok := vols[csiUnmapper.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnstageVolume call")
|
||||
}
|
||||
|
||||
// ensure csi client call and node unpblished
|
||||
pubs := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if _, ok := pubs[csiUnmapper.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnpublishVolume call")
|
||||
}
|
||||
}
|
293
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go
generated
vendored
293
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go
generated
vendored
@ -1,293 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/grpc"
|
||||
api "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
type csiClient interface {
|
||||
NodePublishVolume(
|
||||
ctx context.Context,
|
||||
volumeid string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error
|
||||
NodeUnpublishVolume(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
targetPath string,
|
||||
) error
|
||||
NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishVolumeInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error
|
||||
NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error
|
||||
NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error)
|
||||
}
|
||||
|
||||
// csiClient encapsulates all csi-plugin methods
|
||||
type csiDriverClient struct {
|
||||
driverName string
|
||||
nodeClient csipb.NodeClient
|
||||
}
|
||||
|
||||
var _ csiClient = &csiDriverClient{}
|
||||
|
||||
func newCsiDriverClient(driverName string) *csiDriverClient {
|
||||
c := &csiDriverClient{driverName: driverName}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodePublishVolume(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error {
|
||||
glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if targetPath == "" {
|
||||
return errors.New("missing target path")
|
||||
}
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodePublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
Readonly: readOnly,
|
||||
PublishInfo: volumeInfo,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
NodePublishSecrets: nodePublishSecrets,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
},
|
||||
}
|
||||
if stagingTargetPath != "" {
|
||||
req.StagingTargetPath = stagingTargetPath
|
||||
}
|
||||
|
||||
if fsType == fsTypeBlockName {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
|
||||
Block: &csipb.VolumeCapability_BlockVolume{},
|
||||
}
|
||||
} else {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
_, err = nodeClient.NodePublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
|
||||
glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if targetPath == "" {
|
||||
return errors.New("missing target path")
|
||||
}
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeUnpublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
}
|
||||
|
||||
_, err = nodeClient.NodeUnpublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error {
|
||||
glog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if stagingTargetPath == "" {
|
||||
return errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeStageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
PublishInfo: publishInfo,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
},
|
||||
NodeStageSecrets: nodeStageSecrets,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
}
|
||||
|
||||
if fsType == fsTypeBlockName {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
|
||||
Block: &csipb.VolumeCapability_BlockVolume{},
|
||||
}
|
||||
} else {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
_, err = nodeClient.NodeStageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
|
||||
glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if stagingTargetPath == "" {
|
||||
return errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeUnstageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
}
|
||||
_, err = nodeClient.NodeUnstageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
|
||||
glog.V(4).Info(log("calling NodeGetCapabilities rpc"))
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeGetCapabilitiesRequest{}
|
||||
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.GetCapabilities(), nil
|
||||
}
|
||||
|
||||
func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode {
|
||||
switch am {
|
||||
case api.ReadWriteOnce:
|
||||
return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
|
||||
case api.ReadOnlyMany:
|
||||
return csipb.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
|
||||
case api.ReadWriteMany:
|
||||
return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
|
||||
}
|
||||
return csipb.VolumeCapability_AccessMode_UNKNOWN
|
||||
}
|
||||
|
||||
func newGrpcConn(driverName string) (*grpc.ClientConn, error) {
|
||||
if driverName == "" {
|
||||
return nil, fmt.Errorf("driver name is empty")
|
||||
}
|
||||
addr := fmt.Sprintf(csiAddrTemplate, driverName)
|
||||
// TODO once KubeletPluginsWatcher graduates to beta, remove FeatureGate check
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) {
|
||||
driver, ok := csiDrivers.driversMap[driverName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
|
||||
}
|
||||
addr = driver.driverEndpoint
|
||||
}
|
||||
network := "unix"
|
||||
glog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr))
|
||||
|
||||
return grpc.Dial(
|
||||
addr,
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.Dial(network, target)
|
||||
}),
|
||||
)
|
||||
}
|
276
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client_test.go
generated
vendored
276
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client_test.go
generated
vendored
@ -1,276 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||
)
|
||||
|
||||
type fakeCsiDriverClient struct {
|
||||
t *testing.T
|
||||
nodeClient *fake.NodeClient
|
||||
}
|
||||
|
||||
func newFakeCsiDriverClient(t *testing.T, stagingCapable bool) *fakeCsiDriverClient {
|
||||
return &fakeCsiDriverClient{
|
||||
t: t,
|
||||
nodeClient: fake.NewNodeClient(stagingCapable),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodePublishVolume(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error {
|
||||
c.t.Log("calling fake.NodePublishVolume...")
|
||||
req := &csipb.NodePublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
Readonly: readOnly,
|
||||
PublishInfo: volumeInfo,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
NodePublishSecrets: nodePublishSecrets,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodePublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
|
||||
c.t.Log("calling fake.NodeUnpublishVolume...")
|
||||
req := &csipb.NodeUnpublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeUnpublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error {
|
||||
c.t.Log("calling fake.NodeStageVolume...")
|
||||
req := &csipb.NodeStageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
PublishInfo: publishInfo,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeStageSecrets: nodeStageSecrets,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeStageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
|
||||
c.t.Log("calling fake.NodeUnstageVolume...")
|
||||
req := &csipb.NodeUnstageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
}
|
||||
_, err := c.nodeClient.NodeUnstageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
|
||||
c.t.Log("calling fake.NodeGetCapabilities...")
|
||||
req := &csipb.NodeGetCapabilitiesRequest{}
|
||||
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.GetCapabilities(), nil
|
||||
}
|
||||
|
||||
func setupClient(t *testing.T, stageUnstageSet bool) csiClient {
|
||||
return newFakeCsiDriverClient(t, stageUnstageSet)
|
||||
}
|
||||
|
||||
func TestClientNodePublishVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
targetPath string
|
||||
fsType string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", targetPath: "/test/path"},
|
||||
{name: "missing volID", targetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "bad fs", volID: "vol-test", targetPath: "/test/path", fsType: "badfs", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodePublishVolume(
|
||||
context.Background(),
|
||||
tc.volID,
|
||||
false,
|
||||
"",
|
||||
tc.targetPath,
|
||||
api.ReadWriteOnce,
|
||||
map[string]string{"device": "/dev/null"},
|
||||
map[string]string{"attr0": "val0"},
|
||||
map[string]string{},
|
||||
tc.fsType,
|
||||
)
|
||||
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeUnpublishVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
targetPath string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", targetPath: "/test/path"},
|
||||
{name: "missing volID", targetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeUnpublishVolume(context.Background(), tc.volID, tc.targetPath)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeStageVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
stagingTargetPath string
|
||||
fsType string
|
||||
secret map[string]string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", stagingTargetPath: "/test/path", fsType: "ext4"},
|
||||
{name: "missing volID", stagingTargetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "bad fs", volID: "vol-test", stagingTargetPath: "/test/path", fsType: "badfs", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.name)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeStageVolume(
|
||||
context.Background(),
|
||||
tc.volID,
|
||||
map[string]string{"device": "/dev/null"},
|
||||
tc.stagingTargetPath,
|
||||
tc.fsType,
|
||||
api.ReadWriteOnce,
|
||||
tc.secret,
|
||||
map[string]string{"attr0": "val0"},
|
||||
)
|
||||
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeUnstageVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
stagingTargetPath string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", stagingTargetPath: "/test/path"},
|
||||
{name: "missing volID", stagingTargetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.name)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeUnstageVolume(
|
||||
context.Background(),
|
||||
tc.volID, tc.stagingTargetPath,
|
||||
)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
341
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
341
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
@ -1,341 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
//TODO (vladimirvivien) move this in a central loc later
|
||||
var (
|
||||
volDataKey = struct {
|
||||
specVolID,
|
||||
volHandle,
|
||||
driverName,
|
||||
nodeName,
|
||||
attachmentID string
|
||||
}{
|
||||
"specVolID",
|
||||
"volumeHandle",
|
||||
"driverName",
|
||||
"nodeName",
|
||||
"attachmentID",
|
||||
}
|
||||
)
|
||||
|
||||
type csiMountMgr struct {
|
||||
csiClient csiClient
|
||||
k8s kubernetes.Interface
|
||||
plugin *csiPlugin
|
||||
driverName string
|
||||
volumeID string
|
||||
specVolumeID string
|
||||
readOnly bool
|
||||
spec *volume.Spec
|
||||
pod *api.Pod
|
||||
podUID types.UID
|
||||
options volume.VolumeOptions
|
||||
volumeInfo map[string]string
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
// volume.Volume methods
|
||||
var _ volume.Volume = &csiMountMgr{}
|
||||
|
||||
func (c *csiMountMgr) GetPath() string {
|
||||
dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount")
|
||||
glog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
|
||||
return dir
|
||||
}
|
||||
|
||||
func getTargetPath(uid types.UID, specVolumeID string, host volume.VolumeHost) string {
|
||||
specVolID := kstrings.EscapeQualifiedNameForDisk(specVolumeID)
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(csiPluginName), specVolID)
|
||||
}
|
||||
|
||||
// volume.Mounter methods
|
||||
var _ volume.Mounter = &csiMountMgr{}
|
||||
|
||||
func (c *csiMountMgr) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) SetUp(fsGroup *int64) error {
|
||||
return c.SetUpAt(c.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
|
||||
|
||||
mounted, err := isDirMounted(c.plugin, dir)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir))
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted {
|
||||
glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
csiSource, err := getCSISourceFromSpec(c.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
csi := c.csiClient
|
||||
nodeName := string(c.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so
|
||||
deviceMountPath := ""
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
if stageUnstageSet {
|
||||
deviceMountPath, err = makeDeviceMountPath(c.plugin, c.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
if c.volumeInfo == nil {
|
||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
c.volumeInfo = attachment.Status.AttachmentMetadata
|
||||
}
|
||||
|
||||
attribs := csiSource.VolumeAttributes
|
||||
|
||||
nodePublishSecrets := map[string]string{}
|
||||
if csiSource.NodePublishSecretRef != nil {
|
||||
nodePublishSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodePublishSecretRef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching NodePublishSecretRef %s/%s failed: %v",
|
||||
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create target_dir before call to NodePublish
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("created target path successfully [%s]", dir))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := api.ReadWriteOnce
|
||||
if c.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
fsType := csiSource.FSType
|
||||
err = csi.NodePublishVolume(
|
||||
ctx,
|
||||
c.volumeID,
|
||||
c.readOnly,
|
||||
deviceMountPath,
|
||||
dir,
|
||||
accessMode,
|
||||
c.volumeInfo,
|
||||
attribs,
|
||||
nodePublishSecrets,
|
||||
fsType,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("mounter.SetupAt failed: %v", err))
|
||||
if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil {
|
||||
glog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// apply volume ownership
|
||||
if !c.readOnly && fsGroup != nil {
|
||||
err := volume.SetVolumeOwnership(c, fsGroup)
|
||||
if err != nil {
|
||||
// attempt to rollback mount.
|
||||
glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err))
|
||||
glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID))
|
||||
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
|
||||
glog.Error(log(
|
||||
"mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)",
|
||||
c.volumeID, unpubErr, err,
|
||||
))
|
||||
return fmt.Errorf("%v (caused by %v)", unpubErr, err)
|
||||
}
|
||||
|
||||
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
|
||||
glog.Error(log(
|
||||
"mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)",
|
||||
dir, unmountErr, err,
|
||||
))
|
||||
return fmt.Errorf("%v (caused by %v)", unmountErr, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID))
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) GetAttributes() volume.Attributes {
|
||||
mounter := c.plugin.host.GetMounter(c.plugin.GetPluginName())
|
||||
path := c.GetPath()
|
||||
supportSelinux, err := mounter.GetSELinuxSupport(path)
|
||||
if err != nil {
|
||||
glog.V(2).Info(log("error checking for SELinux support: %s", err))
|
||||
// Best guess
|
||||
supportSelinux = false
|
||||
}
|
||||
return volume.Attributes{
|
||||
ReadOnly: c.readOnly,
|
||||
Managed: !c.readOnly,
|
||||
SupportsSELinux: supportSelinux,
|
||||
}
|
||||
}
|
||||
|
||||
// volume.Unmounter methods
|
||||
var _ volume.Unmounter = &csiMountMgr{}
|
||||
|
||||
func (c *csiMountMgr) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||
glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir))
|
||||
|
||||
// is dir even mounted ?
|
||||
// TODO (vladimirvivien) this check may not work for an emptyDir or local storage
|
||||
// see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524
|
||||
mounted, err := isDirMounted(c.plugin, dir)
|
||||
if err != nil {
|
||||
glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if !mounted {
|
||||
glog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
volID := c.volumeID
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
|
||||
glog.Errorf(log("mounter.TearDownAt failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// clean mount point dir
|
||||
if err := removeMountDir(c.plugin, dir); err != nil {
|
||||
glog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
|
||||
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
|
||||
mounter := plug.host.GetMounter(plug.GetPluginName())
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
|
||||
return false, err
|
||||
}
|
||||
return !notMnt, nil
|
||||
}
|
||||
|
||||
// removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir
|
||||
func removeMountDir(plug *csiPlugin, mountPath string) error {
|
||||
glog.V(4).Info(log("removing mount path [%s]", mountPath))
|
||||
if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil {
|
||||
glog.Error(log("failed while checking mount path stat [%s]", pathErr))
|
||||
return pathErr
|
||||
} else if !pathExists {
|
||||
glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
mounter := plug.host.GetMounter(plug.GetPluginName())
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(mountPath)
|
||||
if err != nil {
|
||||
glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err))
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
|
||||
if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("failed to remove dir [%s]: %v", mountPath, err))
|
||||
return err
|
||||
}
|
||||
// remove volume data file as well
|
||||
volPath := path.Dir(mountPath)
|
||||
dataFile := path.Join(volPath, volDataFileName)
|
||||
glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
|
||||
if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
|
||||
return err
|
||||
}
|
||||
// remove volume path
|
||||
glog.V(4).Info(log("deleting volume path [%s]", volPath))
|
||||
if err := os.Remove(volPath); err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("failed to delete volume path [%s]: %v", volPath, err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
264
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go
generated
vendored
264
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go
generated
vendored
@ -1,264 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
var (
|
||||
testDriver = "test-driver"
|
||||
testVol = "vol-123"
|
||||
testns = "test-ns"
|
||||
testPodUID = types.UID("test-pod")
|
||||
)
|
||||
|
||||
func TestMounterGetPath(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// TODO (vladimirvivien) specName with slashes will not work
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolumeName string
|
||||
path string
|
||||
}{
|
||||
{
|
||||
name: "simple specName",
|
||||
specVolumeName: "spec-0",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "spec-0", "/mount")),
|
||||
},
|
||||
{
|
||||
name: "specName with dots",
|
||||
specVolumeName: "test.spec.1",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "test.spec.1", "/mount")),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
mounter, err := plug.NewMounter(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
|
||||
path := csiMounter.GetPath()
|
||||
|
||||
if tc.path != path {
|
||||
t.Errorf("expecting path %s, got %s", tc.path, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMounterSetUp(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
pvName := pv.GetName()
|
||||
|
||||
mounter, err := plug.NewMounter(
|
||||
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
|
||||
if mounter == nil {
|
||||
t.Fatal("failed to create CSI mounter")
|
||||
}
|
||||
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
csiMounter.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
|
||||
|
||||
attachment := &storage.VolumeAttachment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: attachID,
|
||||
},
|
||||
Spec: storage.VolumeAttachmentSpec{
|
||||
NodeName: "test-node",
|
||||
Attacher: csiPluginName,
|
||||
Source: storage.VolumeAttachmentSource{
|
||||
PersistentVolumeName: &pvName,
|
||||
},
|
||||
},
|
||||
Status: storage.VolumeAttachmentStatus{
|
||||
Attached: false,
|
||||
AttachError: nil,
|
||||
DetachError: nil,
|
||||
},
|
||||
}
|
||||
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
fsGroup := int64(2000)
|
||||
if err := csiMounter.SetUp(&fsGroup); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
}
|
||||
|
||||
//Test the default value of file system type is not overridden
|
||||
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
|
||||
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
|
||||
}
|
||||
|
||||
path := csiMounter.GetPath()
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure call went all the way
|
||||
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if pubs[csiMounter.volumeID] != csiMounter.GetPath() {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmounterTeardown(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file prior to unmount
|
||||
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
// do a fake local mount
|
||||
diskMounter := util.NewSafeFormatAndMountFromHost(plug.GetPluginName(), plug.host)
|
||||
if err := diskMounter.FormatAndMount("/fake/device", dir, "testfs", nil); err != nil {
|
||||
t.Errorf("failed to mount dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
path.Dir(dir),
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
csiUnmounter := unmounter.(*csiMountMgr)
|
||||
csiUnmounter.csiClient = setupClient(t, true)
|
||||
err = csiUnmounter.TearDownAt(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ensure csi client call
|
||||
pubs := csiUnmounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if _, ok := pubs[csiUnmounter.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnpublishVolume call")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSaveVolumeData(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
testCases := []struct {
|
||||
name string
|
||||
data map[string]string
|
||||
shouldFail bool
|
||||
}{
|
||||
{name: "test with data ok", data: map[string]string{"key0": "val0", "_key1": "val1", "key2": "val2"}},
|
||||
{name: "test with data ok 2 ", data: map[string]string{"_key0_": "val0", "&key1": "val1", "key2": "val2"}},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
specVolID := fmt.Sprintf("spec-volid-%d", i)
|
||||
mountDir := path.Join(getTargetPath(testPodUID, specVolID, plug.host), "/mount")
|
||||
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
|
||||
}
|
||||
|
||||
err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data)
|
||||
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Errorf("unexpected failure: %v", err)
|
||||
}
|
||||
// did file get created
|
||||
dataDir := getTargetPath(testPodUID, specVolID, plug.host)
|
||||
file := path.Join(dataDir, volDataFileName)
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
t.Errorf("failed to create data dir: %v", err)
|
||||
}
|
||||
|
||||
// validate content
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Errorf("failed to read data file: %v", err)
|
||||
}
|
||||
|
||||
jsonData := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(jsonData).Encode(tc.data); err != nil {
|
||||
t.Errorf("failed to encode json: %v", err)
|
||||
}
|
||||
if string(data) != jsonData.String() {
|
||||
t.Errorf("expecting encoded data %v, got %v", string(data), jsonData)
|
||||
}
|
||||
}
|
||||
}
|
443
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
443
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
@ -1,443 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
|
||||
)
|
||||
|
||||
const (
|
||||
csiPluginName = "kubernetes.io/csi"
|
||||
|
||||
// TODO (vladimirvivien) implement a more dynamic way to discover
|
||||
// the unix domain socket path for each installed csi driver.
|
||||
// TODO (vladimirvivien) would be nice to name socket with a .sock extension
|
||||
// for consistency.
|
||||
csiAddrTemplate = "/var/lib/kubelet/plugins/%v/csi.sock"
|
||||
csiTimeout = 15 * time.Second
|
||||
volNameSep = "^"
|
||||
volDataFileName = "vol_data.json"
|
||||
fsTypeBlockName = "block"
|
||||
)
|
||||
|
||||
type csiPlugin struct {
|
||||
host volume.VolumeHost
|
||||
blockEnabled bool
|
||||
}
|
||||
|
||||
// ProbeVolumePlugins returns implemented plugins
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
p := &csiPlugin{
|
||||
host: nil,
|
||||
blockEnabled: utilfeature.DefaultFeatureGate.Enabled(features.CSIBlockVolume),
|
||||
}
|
||||
return []volume.VolumePlugin{p}
|
||||
}
|
||||
|
||||
// volume.VolumePlugin methods
|
||||
var _ volume.VolumePlugin = &csiPlugin{}
|
||||
|
||||
type csiDriver struct {
|
||||
driverName string
|
||||
driverEndpoint string
|
||||
}
|
||||
|
||||
type csiDriversStore struct {
|
||||
driversMap map[string]csiDriver
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// csiDrivers map keep track of all registered CSI drivers on the node and their
|
||||
// corresponding sockets
|
||||
var csiDrivers csiDriversStore
|
||||
|
||||
var lm labelmanager.Interface
|
||||
|
||||
// RegistrationCallback is called by kubelet's plugin watcher upon detection
|
||||
// of a new registration socket opened by CSI Driver registrar side car.
|
||||
func RegistrationCallback(pluginName string, endpoint string, versions []string, socketPath string) (error, chan bool) {
|
||||
|
||||
glog.Infof(log("Callback from kubelet with plugin name: %s endpoint: %s versions: %s socket path: %s",
|
||||
pluginName, endpoint, strings.Join(versions, ","), socketPath))
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = socketPath
|
||||
}
|
||||
// Calling nodeLabelManager to update label for newly registered CSI driver
|
||||
err := lm.AddLabels(pluginName)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
|
||||
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
|
||||
csiDrivers.Lock()
|
||||
defer csiDrivers.Unlock()
|
||||
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) Init(host volume.VolumeHost) error {
|
||||
glog.Info(log("plugin initializing..."))
|
||||
p.host = host
|
||||
|
||||
// Initializing csiDrivers map and label management channels
|
||||
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
|
||||
lm = labelmanager.NewLabelManager(host.GetNodeName(), host.GetKubeClient())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) GetPluginName() string {
|
||||
return csiPluginName
|
||||
}
|
||||
|
||||
// GetvolumeName returns a concatenated string of CSIVolumeSource.Driver<volNameSe>CSIVolumeSource.VolumeHandle
|
||||
// That string value is used in Detach() to extract driver name and volumeName.
|
||||
func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
csi, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
// return driverName<separator>volumeHandle
|
||||
return fmt.Sprintf("%s%s%s", csi.Driver, volNameSep, csi.VolumeHandle), nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
// TODO (vladimirvivien) CanSupport should also take into account
|
||||
// the availability/registration of specified Driver in the volume source
|
||||
return spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewMounter(
|
||||
spec *volume.Spec,
|
||||
pod *api.Pod,
|
||||
_ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
pvSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readOnly, err := getReadOnlyFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("failed to get a kubernetes client"))
|
||||
return nil, errors.New("failed to get a Kubernetes client")
|
||||
}
|
||||
|
||||
csi := newCsiDriverClient(pvSource.Driver)
|
||||
|
||||
mounter := &csiMountMgr{
|
||||
plugin: p,
|
||||
k8s: k8s,
|
||||
spec: spec,
|
||||
pod: pod,
|
||||
podUID: pod.UID,
|
||||
driverName: pvSource.Driver,
|
||||
volumeID: pvSource.VolumeHandle,
|
||||
specVolumeID: spec.Name(),
|
||||
csiClient: csi,
|
||||
readOnly: readOnly,
|
||||
}
|
||||
|
||||
// Save volume info in pod dir
|
||||
dir := mounter.GetPath()
|
||||
dataDir := path.Dir(dir) // dropoff /mount at end
|
||||
|
||||
if err := os.MkdirAll(dataDir, 0750); err != nil {
|
||||
glog.Error(log("failed to create dir %#v: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
glog.V(4).Info(log("created path successfully [%s]", dataDir))
|
||||
|
||||
// persist volume info data for teardown
|
||||
node := string(p.host.GetNodeName())
|
||||
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
|
||||
volData := map[string]string{
|
||||
volDataKey.specVolID: spec.Name(),
|
||||
volDataKey.volHandle: pvSource.VolumeHandle,
|
||||
volDataKey.driverName: pvSource.Driver,
|
||||
volDataKey.nodeName: node,
|
||||
volDataKey.attachmentID: attachID,
|
||||
}
|
||||
|
||||
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
|
||||
glog.Error(log("failed to save volume info data: %v", err))
|
||||
if err := os.RemoveAll(dataDir); err != nil {
|
||||
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("mounter created successfully"))
|
||||
|
||||
return mounter, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
|
||||
|
||||
unmounter := &csiMountMgr{
|
||||
plugin: p,
|
||||
podUID: podUID,
|
||||
specVolumeID: specName,
|
||||
}
|
||||
|
||||
// load volume info from file
|
||||
dir := unmounter.GetPath()
|
||||
dataDir := path.Dir(dir) // dropoff /mount at end
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err))
|
||||
return nil, err
|
||||
}
|
||||
unmounter.driverName = data[volDataKey.driverName]
|
||||
unmounter.volumeID = data[volDataKey.volHandle]
|
||||
unmounter.csiClient = newCsiDriverClient(unmounter.driverName)
|
||||
|
||||
return unmounter, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
|
||||
|
||||
volData, err := loadVolumeData(mountPath, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
|
||||
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: volData[volDataKey.specVolID],
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
CSI: &api.CSIPersistentVolumeSource{
|
||||
Driver: volData[volDataKey.driverName],
|
||||
VolumeHandle: volData[volDataKey.volHandle],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(pv, false), nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) SupportsMountOption() bool {
|
||||
// TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags
|
||||
// to probe for the result for this method:w
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *csiPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// volume.AttachableVolumePlugin methods
|
||||
var _ volume.AttachableVolumePlugin = &csiPlugin{}
|
||||
|
||||
func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("unable to get kubernetes client from host"))
|
||||
return nil, errors.New("unable to get Kubernetes client")
|
||||
}
|
||||
|
||||
return &csiAttacher{
|
||||
plugin: p,
|
||||
k8s: k8s,
|
||||
waitSleepTime: 1 * time.Second,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("unable to get kubernetes client from host"))
|
||||
return nil, errors.New("unable to get Kubernetes client")
|
||||
}
|
||||
|
||||
return &csiAttacher{
|
||||
plugin: p,
|
||||
k8s: k8s,
|
||||
waitSleepTime: 1 * time.Second,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
m := p.host.GetMounter(p.GetPluginName())
|
||||
return mount.GetMountRefs(m, deviceMountPath)
|
||||
}
|
||||
|
||||
// BlockVolumePlugin methods
|
||||
var _ volume.BlockVolumePlugin = &csiPlugin{}
|
||||
|
||||
func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opts volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
pvSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readOnly, err := getReadOnlyFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
|
||||
client := newCsiDriverClient(pvSource.Driver)
|
||||
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("failed to get a kubernetes client"))
|
||||
return nil, errors.New("failed to get a Kubernetes client")
|
||||
}
|
||||
|
||||
mapper := &csiBlockMapper{
|
||||
csiClient: client,
|
||||
k8s: k8s,
|
||||
plugin: p,
|
||||
volumeID: pvSource.VolumeHandle,
|
||||
driverName: pvSource.Driver,
|
||||
readOnly: readOnly,
|
||||
spec: spec,
|
||||
podUID: podRef.UID,
|
||||
}
|
||||
|
||||
// Save volume info in pod dir
|
||||
dataDir := getVolumeDeviceDataDir(spec.Name(), p.host)
|
||||
|
||||
if err := os.MkdirAll(dataDir, 0750); err != nil {
|
||||
glog.Error(log("failed to create data dir %s: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
glog.V(4).Info(log("created path successfully [%s]", dataDir))
|
||||
|
||||
// persist volume info data for teardown
|
||||
node := string(p.host.GetNodeName())
|
||||
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
|
||||
volData := map[string]string{
|
||||
volDataKey.specVolID: spec.Name(),
|
||||
volDataKey.volHandle: pvSource.VolumeHandle,
|
||||
volDataKey.driverName: pvSource.Driver,
|
||||
volDataKey.nodeName: node,
|
||||
volDataKey.attachmentID: attachID,
|
||||
}
|
||||
|
||||
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
|
||||
glog.Error(log("failed to save volume info data: %v", err))
|
||||
if err := os.RemoveAll(dataDir); err != nil {
|
||||
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mapper, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
|
||||
unmapper := &csiBlockMapper{
|
||||
plugin: p,
|
||||
podUID: podUID,
|
||||
specName: volName,
|
||||
}
|
||||
|
||||
// load volume info from file
|
||||
dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host)
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
unmapper.driverName = data[volDataKey.driverName]
|
||||
unmapper.volumeID = data[volDataKey.volHandle]
|
||||
unmapper.csiClient = newCsiDriverClient(unmapper.driverName)
|
||||
|
||||
return unmapper, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapPath string) (*volume.Spec, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
|
||||
|
||||
dataDir := getVolumeDeviceDataDir(specVolName, p.host)
|
||||
volData, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
|
||||
|
||||
blockMode := api.PersistentVolumeBlock
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: volData[volDataKey.specVolID],
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
CSI: &api.CSIPersistentVolumeSource{
|
||||
Driver: volData[volDataKey.driverName],
|
||||
VolumeHandle: volData[volDataKey.volHandle],
|
||||
},
|
||||
},
|
||||
VolumeMode: &blockMode,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(pv, false), nil
|
||||
}
|
475
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin_test.go
generated
vendored
475
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin_test.go
generated
vendored
@ -1,475 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
// create a plugin mgr to load plugins and setup a fake client
|
||||
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
|
||||
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
|
||||
}
|
||||
|
||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't create temp dir: %v", err)
|
||||
}
|
||||
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHost(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
)
|
||||
plugMgr := &volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(csiPluginName)
|
||||
if err != nil {
|
||||
t.Fatalf("can't find plugin %v", csiPluginName)
|
||||
}
|
||||
|
||||
csiPlug, ok := plug.(*csiPlugin)
|
||||
if !ok {
|
||||
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
||||
}
|
||||
|
||||
return csiPlug, tmpDir
|
||||
}
|
||||
|
||||
func makeTestPV(name string, sizeGig int, driverName, volID string) *api.PersistentVolume {
|
||||
return &api.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(
|
||||
fmt.Sprintf("%dGi", sizeGig),
|
||||
),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
CSI: &api.CSIPersistentVolumeSource{
|
||||
Driver: driverName,
|
||||
VolumeHandle: volID,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginGetPluginName(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
if plug.GetPluginName() != "kubernetes.io/csi" {
|
||||
t.Errorf("unexpected plugin name %v", plug.GetPluginName())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginGetVolumeName(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
testCases := []struct {
|
||||
name string
|
||||
driverName string
|
||||
volName string
|
||||
shouldFail bool
|
||||
}{
|
||||
{"alphanum names", "testdr", "testvol", false},
|
||||
{"mixchar driver", "test.dr.cc", "testvol", false},
|
||||
{"mixchar volume", "testdr", "test-vol-name", false},
|
||||
{"mixchars all", "test-driver", "test.vol.name", false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("testing: %s", tc.name)
|
||||
pv := makeTestPV("test-pv", 10, tc.driverName, tc.volName)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
name, err := plug.GetVolumeName(spec)
|
||||
if tc.shouldFail && err == nil {
|
||||
t.Fatal("GetVolumeName should fail, but got err=nil")
|
||||
}
|
||||
if name != fmt.Sprintf("%s%s%s", tc.driverName, volNameSep, tc.volName) {
|
||||
t.Errorf("unexpected volume name %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginCanSupport(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
|
||||
if !plug.CanSupport(spec) {
|
||||
t.Errorf("should support CSI spec")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolID string
|
||||
data map[string]string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "valid spec name",
|
||||
specVolID: "test.vol.id",
|
||||
data: map[string]string{volDataKey.specVolID: "test.vol.id", volDataKey.volHandle: "test-vol0", volDataKey.driverName: "test-driver0"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
dir := getTargetPath(testPodUID, tc.specVolID, plug.host)
|
||||
|
||||
// create the data file
|
||||
if tc.data != nil {
|
||||
mountDir := path.Join(getTargetPath(testPodUID, tc.specVolID, plug.host), "/mount")
|
||||
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
|
||||
}
|
||||
if err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// rebuild spec
|
||||
spec, err := plug.ConstructVolumeSpec("test-pv", dir)
|
||||
if tc.shouldFail {
|
||||
if err == nil {
|
||||
t.Fatal("expecting ConstructVolumeSpec to fail, but got nil error")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
|
||||
if volHandle != tc.data[volDataKey.volHandle] {
|
||||
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
|
||||
}
|
||||
|
||||
if spec.Name() != tc.specVolID {
|
||||
t.Errorf("Unexpected spec name %s", spec.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewMounter(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
mounter, err := plug.NewMounter(
|
||||
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
|
||||
if mounter == nil {
|
||||
t.Fatal("failed to create CSI mounter")
|
||||
}
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
|
||||
// validate mounter fields
|
||||
if csiMounter.driverName != testDriver {
|
||||
t.Error("mounter driver name not set")
|
||||
}
|
||||
if csiMounter.volumeID != testVol {
|
||||
t.Error("mounter volume id not set")
|
||||
}
|
||||
if csiMounter.pod == nil {
|
||||
t.Error("mounter pod not set")
|
||||
}
|
||||
if csiMounter.podUID == types.UID("") {
|
||||
t.Error("mounter podUID not set")
|
||||
}
|
||||
if csiMounter.csiClient == nil {
|
||||
t.Error("mounter csiClient is nil")
|
||||
}
|
||||
|
||||
// ensure data file is created
|
||||
dataDir := path.Dir(mounter.GetPath())
|
||||
dataFile := filepath.Join(dataDir, volDataFileName)
|
||||
if _, err := os.Stat(dataFile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("data file not created %s", dataFile)
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewUnmounter(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file to re-create client
|
||||
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
path.Dir(dir),
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
// test unmounter
|
||||
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
|
||||
csiUnmounter := unmounter.(*csiMountMgr)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
if csiUnmounter == nil {
|
||||
t.Fatal("failed to create CSI Unmounter")
|
||||
}
|
||||
|
||||
if csiUnmounter.podUID != testPodUID {
|
||||
t.Error("podUID not set")
|
||||
}
|
||||
|
||||
if csiUnmounter.csiClient == nil {
|
||||
t.Error("unmounter csiClient is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewAttacher(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
if csiAttacher.plugin == nil {
|
||||
t.Error("plugin not set for attacher")
|
||||
}
|
||||
if csiAttacher.k8s == nil {
|
||||
t.Error("Kubernetes client not set for attacher")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewDetacher(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
detacher, err := plug.NewDetacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new detacher: %v", err)
|
||||
}
|
||||
|
||||
csiDetacher := detacher.(*csiAttacher)
|
||||
if csiDetacher.plugin == nil {
|
||||
t.Error("plugin not set for detacher")
|
||||
}
|
||||
if csiDetacher.k8s == nil {
|
||||
t.Error("Kubernetes client not set for detacher")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewBlockMapper(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
|
||||
mounter, err := plug.NewBlockVolumeMapper(
|
||||
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new BlockMapper: %v", err)
|
||||
}
|
||||
|
||||
if mounter == nil {
|
||||
t.Fatal("failed to create CSI BlockMapper, mapper is nill")
|
||||
}
|
||||
csiMapper := mounter.(*csiBlockMapper)
|
||||
|
||||
// validate mounter fields
|
||||
if csiMapper.driverName != testDriver {
|
||||
t.Error("CSI block mapper missing driver name")
|
||||
}
|
||||
if csiMapper.volumeID != testVol {
|
||||
t.Error("CSI block mapper missing volumeID")
|
||||
}
|
||||
|
||||
if csiMapper.podUID == types.UID("") {
|
||||
t.Error("CSI block mapper missing pod.UID")
|
||||
}
|
||||
if csiMapper.csiClient == nil {
|
||||
t.Error("mapper csiClient is nil")
|
||||
}
|
||||
|
||||
// ensure data file is created
|
||||
dataFile := getVolumeDeviceDataDir(csiMapper.spec.Name(), plug.host)
|
||||
if _, err := os.Stat(dataFile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("data file not created %s", dataFile)
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewUnmapper(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file to re-create client
|
||||
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
dir,
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
// test unmounter
|
||||
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
|
||||
csiUnmapper := unmapper.(*csiBlockMapper)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
if csiUnmapper == nil {
|
||||
t.Fatal("failed to create CSI Unmounter")
|
||||
}
|
||||
|
||||
if csiUnmapper.podUID != testPodUID {
|
||||
t.Error("podUID not set")
|
||||
}
|
||||
|
||||
if csiUnmapper.specName != pv.ObjectMeta.Name {
|
||||
t.Error("specName not set")
|
||||
}
|
||||
|
||||
if csiUnmapper.csiClient == nil {
|
||||
t.Error("unmapper csiClient is nil")
|
||||
}
|
||||
|
||||
// test loaded vol data
|
||||
if csiUnmapper.driverName != testDriver {
|
||||
t.Error("unmapper driverName not set")
|
||||
}
|
||||
if csiUnmapper.volumeID != testVol {
|
||||
t.Error("unmapper volumeHandle not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolID string
|
||||
data map[string]string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "valid spec name",
|
||||
specVolID: "test.vol.id",
|
||||
data: map[string]string{volDataKey.specVolID: "test.vol.id", volDataKey.volHandle: "test-vol0", volDataKey.driverName: "test-driver0"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
deviceDataDir := getVolumeDeviceDataDir(tc.specVolID, plug.host)
|
||||
|
||||
// create data file in csi plugin dir
|
||||
if tc.data != nil {
|
||||
if err := os.MkdirAll(deviceDataDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", deviceDataDir, err)
|
||||
}
|
||||
if err := saveVolumeData(deviceDataDir, volDataFileName, tc.data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// rebuild spec
|
||||
spec, err := plug.ConstructBlockVolumeSpec("test-podUID", tc.specVolID, getVolumeDevicePluginDir(tc.specVolID, plug.host))
|
||||
if tc.shouldFail {
|
||||
if err == nil {
|
||||
t.Fatal("expecting ConstructVolumeSpec to fail, but got nil error")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
|
||||
if volHandle != tc.data[volDataKey.volHandle] {
|
||||
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
|
||||
}
|
||||
|
||||
if spec.Name() != tc.specVolID {
|
||||
t.Errorf("Unexpected spec name %s", spec.Name())
|
||||
}
|
||||
}
|
||||
}
|
123
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go
generated
vendored
123
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go
generated
vendored
@ -1,123 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) {
|
||||
credentials := map[string]string{}
|
||||
secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
|
||||
return credentials, err
|
||||
}
|
||||
for key, value := range secret.Data {
|
||||
credentials[key] = string(value)
|
||||
}
|
||||
|
||||
return credentials, nil
|
||||
}
|
||||
|
||||
// saveVolumeData persists parameter data as json file at the provided location
|
||||
func saveVolumeData(dir string, fileName string, data map[string]string) error {
|
||||
dataFilePath := path.Join(dir, fileName)
|
||||
glog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
|
||||
file, err := os.Create(dataFilePath)
|
||||
if err != nil {
|
||||
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
if err := json.NewEncoder(file).Encode(data); err != nil {
|
||||
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadVolumeData loads volume info from specified json file/location
|
||||
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
|
||||
// remove /mount at the end
|
||||
dataFileName := path.Join(dir, fileName)
|
||||
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
|
||||
|
||||
file, err := os.Open(dataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
data := map[string]string{}
|
||||
if err := json.NewDecoder(file).Decode(&data); err != nil {
|
||||
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
|
||||
if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CSI != nil {
|
||||
return spec.PersistentVolume.Spec.CSI, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
|
||||
}
|
||||
|
||||
func getReadOnlyFromSpec(spec *volume.Spec) (bool, error) {
|
||||
if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CSI != nil {
|
||||
return spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
|
||||
}
|
||||
|
||||
// log prepends log string with `kubernetes.io/csi`
|
||||
func log(msg string, parts ...interface{}) string {
|
||||
return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...)
|
||||
}
|
||||
|
||||
// getVolumeDevicePluginDir returns the path where the CSI plugin keeps the
|
||||
// symlink for a block device associated with a given specVolumeID.
|
||||
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/dev
|
||||
func getVolumeDevicePluginDir(specVolID string, host volume.VolumeHost) string {
|
||||
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
|
||||
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "dev")
|
||||
}
|
||||
|
||||
// getVolumeDeviceDataDir returns the path where the CSI plugin keeps the
|
||||
// volume data for a block device associated with a given specVolumeID.
|
||||
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/data
|
||||
func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
|
||||
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
|
||||
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data")
|
||||
}
|
26
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD
generated
vendored
@ -1,26 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fake_client.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi/fake",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
277
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/fake_client.go
generated
vendored
277
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/fake_client.go
generated
vendored
@ -1,277 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
)
|
||||
|
||||
// IdentityClient is a CSI identity client used for testing
|
||||
type IdentityClient struct {
|
||||
nextErr error
|
||||
}
|
||||
|
||||
// NewIdentityClient returns a new IdentityClient
|
||||
func NewIdentityClient() *IdentityClient {
|
||||
return &IdentityClient{}
|
||||
}
|
||||
|
||||
// SetNextError injects expected error
|
||||
func (f *IdentityClient) SetNextError(err error) {
|
||||
f.nextErr = err
|
||||
}
|
||||
|
||||
// GetPluginInfo returns plugin info
|
||||
func (f *IdentityClient) GetPluginInfo(ctx context.Context, in *csipb.GetPluginInfoRequest, opts ...grpc.CallOption) (*csipb.GetPluginInfoResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPluginCapabilities implements csi method
|
||||
func (f *IdentityClient) GetPluginCapabilities(ctx context.Context, in *csipb.GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.GetPluginCapabilitiesResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Probe implements csi method
|
||||
func (f *IdentityClient) Probe(ctx context.Context, in *csipb.ProbeRequest, opts ...grpc.CallOption) (*csipb.ProbeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NodeClient returns CSI node client
|
||||
type NodeClient struct {
|
||||
nodePublishedVolumes map[string]string
|
||||
nodeStagedVolumes map[string]string
|
||||
stageUnstageSet bool
|
||||
nextErr error
|
||||
}
|
||||
|
||||
// NewNodeClient returns fake node client
|
||||
func NewNodeClient(stageUnstageSet bool) *NodeClient {
|
||||
return &NodeClient{
|
||||
nodePublishedVolumes: make(map[string]string),
|
||||
nodeStagedVolumes: make(map[string]string),
|
||||
stageUnstageSet: stageUnstageSet,
|
||||
}
|
||||
}
|
||||
|
||||
// SetNextError injects next expected error
|
||||
func (f *NodeClient) SetNextError(err error) {
|
||||
f.nextErr = err
|
||||
}
|
||||
|
||||
// GetNodePublishedVolumes returns node published volumes
|
||||
func (f *NodeClient) GetNodePublishedVolumes() map[string]string {
|
||||
return f.nodePublishedVolumes
|
||||
}
|
||||
|
||||
// GetNodeStagedVolumes returns node staged volumes
|
||||
func (f *NodeClient) GetNodeStagedVolumes() map[string]string {
|
||||
return f.nodeStagedVolumes
|
||||
}
|
||||
|
||||
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string) {
|
||||
f.nodeStagedVolumes[volID] = deviceMountPath
|
||||
}
|
||||
|
||||
// NodePublishVolume implements CSI NodePublishVolume
|
||||
func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodePublishVolumeResponse, error) {
|
||||
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetTargetPath() == "" {
|
||||
return nil, errors.New("missing target path")
|
||||
}
|
||||
fsTypes := "block|ext4|xfs|zfs"
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
if !strings.Contains(fsTypes, fsType) {
|
||||
return nil, errors.New("invalid fstype")
|
||||
}
|
||||
f.nodePublishedVolumes[req.GetVolumeId()] = req.GetTargetPath()
|
||||
return &csipb.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeUnpublishVolume implements csi method
|
||||
func (f *NodeClient) NodeUnpublishVolume(ctx context.Context, req *csipb.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeUnpublishVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetTargetPath() == "" {
|
||||
return nil, errors.New("missing target path")
|
||||
}
|
||||
delete(f.nodePublishedVolumes, req.GetVolumeId())
|
||||
return &csipb.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeStagevolume implements csi method
|
||||
func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeStageVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetStagingTargetPath() == "" {
|
||||
return nil, errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
fsType := ""
|
||||
fsTypes := "block|ext4|xfs|zfs"
|
||||
mounted := req.GetVolumeCapability().GetMount()
|
||||
if mounted != nil {
|
||||
fsType = mounted.GetFsType()
|
||||
}
|
||||
if !strings.Contains(fsTypes, fsType) {
|
||||
return nil, errors.New("invalid fstype")
|
||||
}
|
||||
|
||||
f.nodeStagedVolumes[req.GetVolumeId()] = req.GetStagingTargetPath()
|
||||
return &csipb.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeUnstageVolume implements csi method
|
||||
func (f *NodeClient) NodeUnstageVolume(ctx context.Context, req *csipb.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeUnstageVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetStagingTargetPath() == "" {
|
||||
return nil, errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
delete(f.nodeStagedVolumes, req.GetVolumeId())
|
||||
return &csipb.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeGetId implements method
|
||||
func (f *NodeClient) NodeGetId(ctx context.Context, in *csipb.NodeGetIdRequest, opts ...grpc.CallOption) (*csipb.NodeGetIdResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NodeGetCapabilities implements csi method
|
||||
func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.NodeGetCapabilitiesResponse, error) {
|
||||
resp := &csipb.NodeGetCapabilitiesResponse{
|
||||
Capabilities: []*csipb.NodeServiceCapability{
|
||||
{
|
||||
Type: &csipb.NodeServiceCapability_Rpc{
|
||||
Rpc: &csipb.NodeServiceCapability_RPC{
|
||||
Type: csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if f.stageUnstageSet {
|
||||
return resp, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ControllerClient represents a CSI Controller client
|
||||
type ControllerClient struct {
|
||||
nextCapabilities []*csipb.ControllerServiceCapability
|
||||
nextErr error
|
||||
}
|
||||
|
||||
// NewControllerClient returns a ControllerClient
|
||||
func NewControllerClient() *ControllerClient {
|
||||
return &ControllerClient{}
|
||||
}
|
||||
|
||||
// SetNextError injects next expected error
|
||||
func (f *ControllerClient) SetNextError(err error) {
|
||||
f.nextErr = err
|
||||
}
|
||||
|
||||
// SetNextCapabilities injects next expected capabilities
|
||||
func (f *ControllerClient) SetNextCapabilities(caps []*csipb.ControllerServiceCapability) {
|
||||
f.nextCapabilities = caps
|
||||
}
|
||||
|
||||
// ControllerGetCapabilities implements csi method
|
||||
func (f *ControllerClient) ControllerGetCapabilities(ctx context.Context, in *csipb.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.ControllerGetCapabilitiesResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if f.nextCapabilities == nil {
|
||||
f.nextCapabilities = []*csipb.ControllerServiceCapability{
|
||||
{
|
||||
Type: &csipb.ControllerServiceCapability_Rpc{
|
||||
Rpc: &csipb.ControllerServiceCapability_RPC{
|
||||
Type: csipb.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return &csipb.ControllerGetCapabilitiesResponse{
|
||||
Capabilities: f.nextCapabilities,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateVolume implements csi method
|
||||
func (f *ControllerClient) CreateVolume(ctx context.Context, in *csipb.CreateVolumeRequest, opts ...grpc.CallOption) (*csipb.CreateVolumeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// DeleteVolume implements csi method
|
||||
func (f *ControllerClient) DeleteVolume(ctx context.Context, in *csipb.DeleteVolumeRequest, opts ...grpc.CallOption) (*csipb.DeleteVolumeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ControllerPublishVolume implements csi method
|
||||
func (f *ControllerClient) ControllerPublishVolume(ctx context.Context, in *csipb.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipb.ControllerPublishVolumeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ControllerUnpublishVolume implements csi method
|
||||
func (f *ControllerClient) ControllerUnpublishVolume(ctx context.Context, in *csipb.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipb.ControllerUnpublishVolumeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ValidateVolumeCapabilities implements csi method
|
||||
func (f *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *csipb.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.ValidateVolumeCapabilitiesResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ListVolumes implements csi method
|
||||
func (f *ControllerClient) ListVolumes(ctx context.Context, in *csipb.ListVolumesRequest, opts ...grpc.CallOption) (*csipb.ListVolumesResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetCapacity implements csi method
|
||||
func (f *ControllerClient) GetCapacity(ctx context.Context, in *csipb.GetCapacityRequest, opts ...grpc.CallOption) (*csipb.GetCapacityResponse, error) {
|
||||
return nil, nil
|
||||
}
|
30
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/BUILD
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/BUILD
generated
vendored
@ -1,30 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["labelmanager.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi/labelmanager",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
251
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/labelmanager.go
generated
vendored
251
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/labelmanager.go
generated
vendored
@ -1,251 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package labelmanager includes internal functions used to add/delete labels to
|
||||
// kubernetes nodes for corresponding CSI drivers
|
||||
package labelmanager // import "k8s.io/kubernetes/pkg/volume/csi/labelmanager"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name of node annotation that contains JSON map of driver names to node
|
||||
// names
|
||||
annotationKey = "csi.volume.kubernetes.io/nodeid"
|
||||
csiPluginName = "kubernetes.io/csi"
|
||||
)
|
||||
|
||||
// labelManagementStruct is struct of channels used for communication between the driver registration
|
||||
// code and the go routine responsible for managing the node's labels
|
||||
type labelManagerStruct struct {
|
||||
nodeName types.NodeName
|
||||
k8s kubernetes.Interface
|
||||
}
|
||||
|
||||
// Interface implements an interface for managing labels of a node
|
||||
type Interface interface {
|
||||
AddLabels(driverName string) error
|
||||
}
|
||||
|
||||
// NewLabelManager initializes labelManagerStruct and returns available interfaces
|
||||
func NewLabelManager(nodeName types.NodeName, kubeClient kubernetes.Interface) Interface {
|
||||
return labelManagerStruct{
|
||||
nodeName: nodeName,
|
||||
k8s: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
// nodeLabelManager waits for labeling requests initiated by the driver's registration
|
||||
// process.
|
||||
func (lm labelManagerStruct) AddLabels(driverName string) error {
|
||||
err := verifyAndAddNodeId(string(lm.nodeName), lm.k8s.CoreV1().Nodes(), driverName, string(lm.nodeName))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update node %s's annotation with error: %+v", lm.nodeName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clones the given map and returns a new map with the given key and value added.
|
||||
// Returns the given map, if annotationKey is empty.
|
||||
func cloneAndAddAnnotation(
|
||||
annotations map[string]string,
|
||||
annotationKey,
|
||||
annotationValue string) map[string]string {
|
||||
if annotationKey == "" {
|
||||
// Don't need to add an annotation.
|
||||
return annotations
|
||||
}
|
||||
// Clone.
|
||||
newAnnotations := map[string]string{}
|
||||
for key, value := range annotations {
|
||||
newAnnotations[key] = value
|
||||
}
|
||||
newAnnotations[annotationKey] = annotationValue
|
||||
return newAnnotations
|
||||
}
|
||||
|
||||
func verifyAndAddNodeId(
|
||||
k8sNodeName string,
|
||||
k8sNodesClient corev1.NodeInterface,
|
||||
csiDriverName string,
|
||||
csiDriverNodeId string) error {
|
||||
// Add or update annotation on Node object
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Node before attempting update, so that
|
||||
// existing changes are not overwritten. RetryOnConflict uses
|
||||
// exponential backoff to avoid exhausting the apiserver.
|
||||
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
glog.Errorf("Failed to get latest version of Node: %v", getErr)
|
||||
return getErr // do not wrap error
|
||||
}
|
||||
|
||||
var previousAnnotationValue string
|
||||
if result.ObjectMeta.Annotations != nil {
|
||||
previousAnnotationValue =
|
||||
result.ObjectMeta.Annotations[annotationKey]
|
||||
glog.V(3).Infof(
|
||||
"previousAnnotationValue=%q", previousAnnotationValue)
|
||||
}
|
||||
|
||||
existingDriverMap := map[string]string{}
|
||||
if previousAnnotationValue != "" {
|
||||
// Parse previousAnnotationValue as JSON
|
||||
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to parse node's %q annotation value (%q) err=%v",
|
||||
annotationKey,
|
||||
previousAnnotationValue,
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := existingDriverMap[csiDriverName]; ok {
|
||||
if val == csiDriverNodeId {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key value {%q: %q} alredy eixst in node %q annotation, no need to update: %v",
|
||||
csiDriverName,
|
||||
csiDriverNodeId,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Add/update annotation value
|
||||
existingDriverMap[csiDriverName] = csiDriverNodeId
|
||||
jsonObj, err := json.Marshal(existingDriverMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed while trying to add key value {%q: %q} to node %q annotation. Existing value: %v",
|
||||
csiDriverName,
|
||||
csiDriverNodeId,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
}
|
||||
|
||||
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
|
||||
result.ObjectMeta.Annotations,
|
||||
annotationKey,
|
||||
string(jsonObj))
|
||||
_, updateErr := k8sNodesClient.Update(result)
|
||||
if updateErr == nil {
|
||||
fmt.Printf(
|
||||
"Updated node %q successfully for CSI driver %q and CSI node name %q",
|
||||
k8sNodeName,
|
||||
csiDriverName,
|
||||
csiDriverNodeId)
|
||||
}
|
||||
return updateErr // do not wrap error
|
||||
})
|
||||
if retryErr != nil {
|
||||
return fmt.Errorf("node update failed: %v", retryErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetches Kubernetes node API object corresponding to k8sNodeName.
|
||||
// If the csiDriverName is present in the node annotation, it is removed.
|
||||
func verifyAndDeleteNodeId(
|
||||
k8sNodeName string,
|
||||
k8sNodesClient corev1.NodeInterface,
|
||||
csiDriverName string) error {
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Node before attempting update, so that
|
||||
// existing changes are not overwritten. RetryOnConflict uses
|
||||
// exponential backoff to avoid exhausting the apiserver.
|
||||
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
glog.Errorf("failed to get latest version of Node: %v", getErr)
|
||||
return getErr // do not wrap error
|
||||
}
|
||||
|
||||
var previousAnnotationValue string
|
||||
if result.ObjectMeta.Annotations != nil {
|
||||
previousAnnotationValue =
|
||||
result.ObjectMeta.Annotations[annotationKey]
|
||||
glog.V(3).Infof(
|
||||
"previousAnnotationValue=%q", previousAnnotationValue)
|
||||
}
|
||||
|
||||
existingDriverMap := map[string]string{}
|
||||
if previousAnnotationValue == "" {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key %q does not exist in node %q annotation, no need to cleanup.",
|
||||
csiDriverName,
|
||||
annotationKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse previousAnnotationValue as JSON
|
||||
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to parse node's %q annotation value (%q) err=%v",
|
||||
annotationKey,
|
||||
previousAnnotationValue,
|
||||
err)
|
||||
}
|
||||
|
||||
if _, ok := existingDriverMap[csiDriverName]; !ok {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key %q does not eixst in node %q annotation, no need to cleanup: %v",
|
||||
csiDriverName,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add/update annotation value
|
||||
delete(existingDriverMap, csiDriverName)
|
||||
jsonObj, err := json.Marshal(existingDriverMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
|
||||
csiDriverName,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
}
|
||||
|
||||
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
|
||||
result.ObjectMeta.Annotations,
|
||||
annotationKey,
|
||||
string(jsonObj))
|
||||
_, updateErr := k8sNodesClient.Update(result)
|
||||
if updateErr == nil {
|
||||
fmt.Printf(
|
||||
"Updated node %q annotation to remove CSI driver %q.",
|
||||
k8sNodeName,
|
||||
csiDriverName)
|
||||
}
|
||||
return updateErr // do not wrap error
|
||||
})
|
||||
if retryErr != nil {
|
||||
return fmt.Errorf("node update failed: %v", retryErr)
|
||||
}
|
||||
return nil
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/volume/doc.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package volume includes internal representations of external volume types
|
||||
// as well as utility methods required to mount/unmount volumes to kubelets.
|
||||
package volume // import "k8s.io/kubernetes/pkg/volume"
|
55
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD
generated
vendored
@ -1,55 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["downwardapi.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/downwardapi",
|
||||
deps = [
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["downwardapi_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS
generated
vendored
@ -1,14 +0,0 @@
|
||||
approvers:
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- thockin
|
||||
- matchstick
|
||||
reviewers:
|
||||
- ivan4th
|
||||
- rata
|
||||
- sjenning
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
309
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
309
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
@ -1,309 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package downwardapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the entry point for plugin detection in a package.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&downwardAPIPlugin{}}
|
||||
}
|
||||
|
||||
const (
|
||||
downwardAPIPluginName = "kubernetes.io/downward-api"
|
||||
)
|
||||
|
||||
// downwardAPIPlugin implements the VolumePlugin interface.
|
||||
type downwardAPIPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &downwardAPIPlugin{}
|
||||
|
||||
func wrappedVolumeSpec() volume.Spec {
|
||||
return volume.Spec{
|
||||
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}}},
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) GetPluginName() string {
|
||||
return downwardAPIPluginName
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _ := getVolumeSource(spec)
|
||||
if volumeSource == nil {
|
||||
return "", fmt.Errorf("Spec does not reference a DownwardAPI volume type")
|
||||
}
|
||||
|
||||
// Return user defined volume name, since this is an ephemeral volume type
|
||||
return spec.Name(), nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return spec.Volume != nil && spec.Volume.DownwardAPI != nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) RequiresRemount() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) SupportsMountOption() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
v := &downwardAPIVolume{
|
||||
volName: spec.Name(),
|
||||
items: spec.Volume.DownwardAPI.Items,
|
||||
pod: pod,
|
||||
podUID: pod.UID,
|
||||
plugin: plugin,
|
||||
}
|
||||
return &downwardAPIVolumeMounter{
|
||||
downwardAPIVolume: v,
|
||||
source: *spec.Volume.DownwardAPI,
|
||||
opts: &opts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &downwardAPIVolumeUnmounter{
|
||||
&downwardAPIVolume{
|
||||
volName: volName,
|
||||
podUID: podUID,
|
||||
plugin: plugin,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
downwardAPIVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
DownwardAPI: &v1.DownwardAPIVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(downwardAPIVolume), nil
|
||||
}
|
||||
|
||||
// downwardAPIVolume retrieves downward API data and placing them into the volume on the host.
|
||||
type downwardAPIVolume struct {
|
||||
volName string
|
||||
items []v1.DownwardAPIVolumeFile
|
||||
pod *v1.Pod
|
||||
podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *v1.POD and not only types.UID
|
||||
plugin *downwardAPIPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
// downwardAPIVolumeMounter fetches info from downward API from the pod
|
||||
// and dumps it in files
|
||||
type downwardAPIVolumeMounter struct {
|
||||
*downwardAPIVolume
|
||||
source v1.DownwardAPIVolumeSource
|
||||
opts *volume.VolumeOptions
|
||||
}
|
||||
|
||||
// downwardAPIVolumeMounter implements volume.Mounter interface
|
||||
var _ volume.Mounter = &downwardAPIVolumeMounter{}
|
||||
|
||||
// downward API volumes are always ReadOnlyManaged
|
||||
func (d *downwardAPIVolume) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: true,
|
||||
Managed: true,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *downwardAPIVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp puts in place the volume plugin.
|
||||
// This function is not idempotent by design. We want the data to be refreshed periodically.
|
||||
// The internal sync interval of kubelet will drive the refresh of data.
|
||||
// TODO: Add volume specific ticker and refresh loop
|
||||
func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
|
||||
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
|
||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode)
|
||||
if err != nil {
|
||||
glog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, *b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating atomic writer: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Write(data)
|
||||
if err != nil {
|
||||
glog.Errorf("Error writing payload to dir: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(b, fsGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectData collects requested downwardAPI in data map.
|
||||
// Map's key is the requested name of file to dump
|
||||
// Map's value is the (sorted) content of the field to be dumped in the file.
|
||||
//
|
||||
// Note: this function is exported so that it can be called from the projection volume driver
|
||||
func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.VolumeHost, defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
|
||||
if defaultMode == nil {
|
||||
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
|
||||
}
|
||||
|
||||
errlist := []error{}
|
||||
data := make(map[string]volumeutil.FileProjection)
|
||||
for _, fileInfo := range items {
|
||||
var fileProjection volumeutil.FileProjection
|
||||
fPath := filepath.Clean(fileInfo.Path)
|
||||
if fileInfo.Mode != nil {
|
||||
fileProjection.Mode = *fileInfo.Mode
|
||||
} else {
|
||||
fileProjection.Mode = *defaultMode
|
||||
}
|
||||
if fileInfo.FieldRef != nil {
|
||||
// TODO: unify with Kubelet.podFieldSelectorRuntimeValue
|
||||
if values, err := fieldpath.ExtractFieldPathAsString(pod, fileInfo.FieldRef.FieldPath); err != nil {
|
||||
glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error())
|
||||
errlist = append(errlist, err)
|
||||
} else {
|
||||
fileProjection.Data = []byte(sortLines(values))
|
||||
}
|
||||
} else if fileInfo.ResourceFieldRef != nil {
|
||||
containerName := fileInfo.ResourceFieldRef.ContainerName
|
||||
nodeAllocatable, err := host.GetNodeAllocatable()
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
} else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil {
|
||||
glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
|
||||
errlist = append(errlist, err)
|
||||
} else {
|
||||
fileProjection.Data = []byte(sortLines(values))
|
||||
}
|
||||
}
|
||||
|
||||
data[fPath] = fileProjection
|
||||
}
|
||||
return data, utilerrors.NewAggregate(errlist)
|
||||
}
|
||||
|
||||
// sortLines sorts the strings generated from map based data
|
||||
// (annotations and labels)
|
||||
func sortLines(values string) string {
|
||||
splitted := strings.Split(values, "\n")
|
||||
sort.Strings(splitted)
|
||||
return strings.Join(splitted, "\n")
|
||||
}
|
||||
|
||||
func (d *downwardAPIVolume) GetPath() string {
|
||||
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName), d.volName)
|
||||
}
|
||||
|
||||
// downwardAPIVolumeCleaner handles cleaning up downwardAPI volumes
|
||||
type downwardAPIVolumeUnmounter struct {
|
||||
*downwardAPIVolume
|
||||
}
|
||||
|
||||
// downwardAPIVolumeUnmounter implements volume.Unmounter interface
|
||||
var _ volume.Unmounter = &downwardAPIVolumeUnmounter{}
|
||||
|
||||
func (c *downwardAPIVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
||||
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
||||
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.DownwardAPIVolumeSource, bool) {
|
||||
var readOnly bool
|
||||
var volumeSource *v1.DownwardAPIVolumeSource
|
||||
|
||||
if spec.Volume != nil && spec.Volume.DownwardAPI != nil {
|
||||
volumeSource = spec.Volume.DownwardAPI
|
||||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
399
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi_test.go
generated
vendored
399
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi_test.go
generated
vendored
@ -1,399 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package downwardapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
downwardAPIDir = "..data"
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testNamespace = "test_metadata_namespace"
|
||||
testName = "test_metadata_name"
|
||||
)
|
||||
|
||||
func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
|
||||
tempDir, err := utiltesting.MkTmpdir("downwardApi_volume_test.")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
}
|
||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
pluginMgr := volume.VolumePluginMgr{}
|
||||
tmpDir, host := newTestHost(t, nil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plugin.GetPluginName() != downwardAPIPluginName {
|
||||
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||
}
|
||||
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{DownwardAPI: &v1.DownwardAPIVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDownwardAPI(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
}
|
||||
annotations := map[string]string{
|
||||
"a1": "value1",
|
||||
"a2": "value2",
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
files map[string]string
|
||||
modes map[string]int32
|
||||
podLabels map[string]string
|
||||
podAnnotations map[string]string
|
||||
steps []testStep
|
||||
}{
|
||||
{
|
||||
name: "test_labels",
|
||||
files: map[string]string{"labels": "metadata.labels"},
|
||||
podLabels: labels1,
|
||||
steps: []testStep{
|
||||
// for steps that involve files, stepName is also
|
||||
// used as the name of the file to verify
|
||||
verifyMapInFile{stepName{"labels"}, labels1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_annotations",
|
||||
files: map[string]string{"annotations": "metadata.annotations"},
|
||||
podAnnotations: annotations,
|
||||
steps: []testStep{
|
||||
verifyMapInFile{stepName{"annotations"}, annotations},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_name",
|
||||
files: map[string]string{"name_file_name": "metadata.name"},
|
||||
steps: []testStep{
|
||||
verifyLinesInFile{stepName{"name_file_name"}, testName},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_namespace",
|
||||
files: map[string]string{"namespace_file_name": "metadata.namespace"},
|
||||
steps: []testStep{
|
||||
verifyLinesInFile{stepName{"namespace_file_name"}, testNamespace},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_write_twice_no_update",
|
||||
files: map[string]string{"labels": "metadata.labels"},
|
||||
podLabels: labels1,
|
||||
steps: []testStep{
|
||||
reSetUp{stepName{"resetup"}, false, nil},
|
||||
verifyMapInFile{stepName{"labels"}, labels1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_write_twice_with_update",
|
||||
files: map[string]string{"labels": "metadata.labels"},
|
||||
podLabels: labels1,
|
||||
steps: []testStep{
|
||||
reSetUp{stepName{"resetup"}, true, labels2},
|
||||
verifyMapInFile{stepName{"labels"}, labels2},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_write_with_unix_path",
|
||||
files: map[string]string{
|
||||
"these/are/my/labels": "metadata.labels",
|
||||
"these/are/your/annotations": "metadata.annotations",
|
||||
},
|
||||
podLabels: labels1,
|
||||
podAnnotations: annotations,
|
||||
steps: []testStep{
|
||||
verifyMapInFile{stepName{"these/are/my/labels"}, labels1},
|
||||
verifyMapInFile{stepName{"these/are/your/annotations"}, annotations},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_write_with_two_consecutive_slashes_in_the_path",
|
||||
files: map[string]string{"this//labels": "metadata.labels"},
|
||||
podLabels: labels1,
|
||||
steps: []testStep{
|
||||
verifyMapInFile{stepName{"this/labels"}, labels1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_default_mode",
|
||||
files: map[string]string{"name_file_name": "metadata.name"},
|
||||
steps: []testStep{
|
||||
verifyMode{stepName{"name_file_name"}, 0644},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_item_mode",
|
||||
files: map[string]string{"name_file_name": "metadata.name"},
|
||||
modes: map[string]int32{"name_file_name": 0400},
|
||||
steps: []testStep{
|
||||
verifyMode{stepName{"name_file_name"}, 0400},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
test := newDownwardAPITest(t, testCase.name, testCase.files, testCase.podLabels, testCase.podAnnotations, testCase.modes)
|
||||
for _, step := range testCase.steps {
|
||||
test.t.Logf("Test case: %q Step: %q", testCase.name, step.getName())
|
||||
step.run(test)
|
||||
}
|
||||
test.tearDown()
|
||||
}
|
||||
}
|
||||
|
||||
type downwardAPITest struct {
|
||||
t *testing.T
|
||||
name string
|
||||
plugin volume.VolumePlugin
|
||||
pod *v1.Pod
|
||||
mounter volume.Mounter
|
||||
volumePath string
|
||||
rootDir string
|
||||
}
|
||||
|
||||
func newDownwardAPITest(t *testing.T, name string, volumeFiles, podLabels, podAnnotations map[string]string, modes map[string]int32) *downwardAPITest {
|
||||
defaultMode := int32(0644)
|
||||
var files []v1.DownwardAPIVolumeFile
|
||||
for path, fieldPath := range volumeFiles {
|
||||
file := v1.DownwardAPIVolumeFile{
|
||||
Path: path,
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: fieldPath,
|
||||
},
|
||||
}
|
||||
if mode, found := modes[path]; found {
|
||||
file.Mode = &mode
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
podMeta := metav1.ObjectMeta{
|
||||
Name: testName,
|
||||
Namespace: testNamespace,
|
||||
Labels: podLabels,
|
||||
Annotations: podAnnotations,
|
||||
}
|
||||
clientset := fake.NewSimpleClientset(&v1.Pod{ObjectMeta: podMeta})
|
||||
|
||||
pluginMgr := volume.VolumePluginMgr{}
|
||||
rootDir, host := newTestHost(t, clientset)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
volumeSpec := &v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
DownwardAPI: &v1.DownwardAPIVolumeSource{
|
||||
DefaultMode: &defaultMode,
|
||||
Items: files,
|
||||
},
|
||||
},
|
||||
}
|
||||
podMeta.UID = testPodUID
|
||||
pod := &v1.Pod{ObjectMeta: podMeta}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
||||
// downwardAPI volume should create its own empty wrapper path
|
||||
podWrapperMetadataDir := fmt.Sprintf("%v/pods/%v/plugins/kubernetes.io~empty-dir/wrapped_%v", rootDir, testPodUID, name)
|
||||
|
||||
if _, err := os.Stat(podWrapperMetadataDir); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, empty-dir wrapper path was not created: %s", podWrapperMetadataDir)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &downwardAPITest{
|
||||
t: t,
|
||||
plugin: plugin,
|
||||
pod: pod,
|
||||
mounter: mounter,
|
||||
volumePath: volumePath,
|
||||
rootDir: rootDir,
|
||||
}
|
||||
}
|
||||
|
||||
func (test *downwardAPITest) tearDown() {
|
||||
unmounter, err := test.plugin.NewUnmounter(test.name, testPodUID)
|
||||
if err != nil {
|
||||
test.t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
test.t.Fatalf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
test.t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(test.volumePath); err == nil {
|
||||
test.t.Errorf("TearDown() failed, volume path still exists: %s", test.volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
test.t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
os.RemoveAll(test.rootDir)
|
||||
}
|
||||
|
||||
// testStep represents a named step of downwardAPITest.
|
||||
// For steps that deal with files, step name also serves
|
||||
// as the name of the file that's used by the step.
|
||||
type testStep interface {
|
||||
getName() string
|
||||
run(*downwardAPITest)
|
||||
}
|
||||
|
||||
type stepName struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (step stepName) getName() string { return step.name }
|
||||
|
||||
func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected string) {
|
||||
data, err := ioutil.ReadFile(path.Join(volumePath, filename))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
return
|
||||
}
|
||||
actualStr := sortLines(string(data))
|
||||
expectedStr := sortLines(expected)
|
||||
if actualStr != expectedStr {
|
||||
t.Errorf("Found `%s`, expected `%s`", actualStr, expectedStr)
|
||||
}
|
||||
}
|
||||
|
||||
type verifyLinesInFile struct {
|
||||
stepName
|
||||
expected string
|
||||
}
|
||||
|
||||
func (step verifyLinesInFile) run(test *downwardAPITest) {
|
||||
doVerifyLinesInFile(test.t, test.volumePath, step.name, step.expected)
|
||||
}
|
||||
|
||||
type verifyMapInFile struct {
|
||||
stepName
|
||||
expected map[string]string
|
||||
}
|
||||
|
||||
func (step verifyMapInFile) run(test *downwardAPITest) {
|
||||
doVerifyLinesInFile(test.t, test.volumePath, step.name, fieldpath.FormatMap(step.expected))
|
||||
}
|
||||
|
||||
type verifyMode struct {
|
||||
stepName
|
||||
expectedMode int32
|
||||
}
|
||||
|
||||
func (step verifyMode) run(test *downwardAPITest) {
|
||||
fileInfo, err := os.Stat(path.Join(test.volumePath, step.name))
|
||||
if err != nil {
|
||||
test.t.Errorf(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
actualMode := fileInfo.Mode()
|
||||
expectedMode := os.FileMode(step.expectedMode)
|
||||
if actualMode != expectedMode {
|
||||
test.t.Errorf("Found mode `%v` expected %v", actualMode, expectedMode)
|
||||
}
|
||||
}
|
||||
|
||||
type reSetUp struct {
|
||||
stepName
|
||||
linkShouldChange bool
|
||||
newLabels map[string]string
|
||||
}
|
||||
|
||||
func (step reSetUp) run(test *downwardAPITest) {
|
||||
if step.newLabels != nil {
|
||||
test.pod.ObjectMeta.Labels = step.newLabels
|
||||
}
|
||||
|
||||
currentTarget, err := os.Readlink(path.Join(test.volumePath, downwardAPIDir))
|
||||
if err != nil {
|
||||
test.t.Errorf("labels file should be a link... %s\n", err.Error())
|
||||
}
|
||||
|
||||
// now re-run Setup
|
||||
if err = test.mounter.SetUp(nil); err != nil {
|
||||
test.t.Errorf("Failed to re-setup volume: %v", err)
|
||||
}
|
||||
|
||||
// get the link of the link
|
||||
currentTarget2, err := os.Readlink(path.Join(test.volumePath, downwardAPIDir))
|
||||
if err != nil {
|
||||
test.t.Errorf(".current should be a link... %s\n", err.Error())
|
||||
}
|
||||
|
||||
switch {
|
||||
case step.linkShouldChange && currentTarget2 == currentTarget:
|
||||
test.t.Errorf("Got and update between the two Setup... Target link should NOT be the same\n")
|
||||
case !step.linkShouldChange && currentTarget2 != currentTarget:
|
||||
test.t.Errorf("No update between the two Setup... Target link should be the same %s %s\n",
|
||||
currentTarget, currentTarget2)
|
||||
}
|
||||
}
|
106
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD
generated
vendored
106
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD
generated
vendored
@ -1,106 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"empty_dir.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"empty_dir_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/empty_dir",
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"empty_dir_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS
generated
vendored
@ -1,14 +0,0 @@
|
||||
approvers:
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- thockin
|
||||
- matchstick
|
||||
reviewers:
|
||||
- ivan4th
|
||||
- rata
|
||||
- sjenning
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
19
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/doc.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package empty_dir contains the internal representation of emptyDir
|
||||
// volumes.
|
||||
package empty_dir // import "k8s.io/kubernetes/pkg/volume/empty_dir"
|
432
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go
generated
vendored
432
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go
generated
vendored
@ -1,432 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
stringsutil "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// TODO: in the near future, this will be changed to be more restrictive
|
||||
// and the group will be set to allow containers to use emptyDir volumes
|
||||
// from the group attribute.
|
||||
//
|
||||
// http://issue.k8s.io/2630
|
||||
const perm os.FileMode = 0777
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{
|
||||
&emptyDirPlugin{nil},
|
||||
}
|
||||
}
|
||||
|
||||
type emptyDirPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &emptyDirPlugin{}
|
||||
|
||||
const (
|
||||
emptyDirPluginName = "kubernetes.io/empty-dir"
|
||||
hugePagesPageSizeMountOption = "pagesize"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, stringsutil.EscapeQualifiedNameForDisk(emptyDirPluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) GetPluginName() string {
|
||||
return emptyDirPluginName
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _ := getVolumeSource(spec)
|
||||
if volumeSource == nil {
|
||||
return "", fmt.Errorf("Spec does not reference an EmptyDir volume type")
|
||||
}
|
||||
|
||||
// Return user defined volume name, since this is an ephemeral volume type
|
||||
return spec.Name(), nil
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
if spec.Volume != nil && spec.Volume.EmptyDir != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) SupportsMountOption() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(plugin.GetPluginName()), &realMountDetector{plugin.host.GetMounter(plugin.GetPluginName())}, opts)
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
medium := v1.StorageMediumDefault
|
||||
|
||||
if spec.Volume.EmptyDir != nil { // Support a non-specified source as EmptyDir.
|
||||
medium = spec.Volume.EmptyDir.Medium
|
||||
}
|
||||
|
||||
return &emptyDir{
|
||||
pod: pod,
|
||||
volName: spec.Name(),
|
||||
medium: medium,
|
||||
mounter: mounter,
|
||||
mountDetector: mountDetector,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()), &realMountDetector{plugin.host.GetMounter(plugin.GetPluginName())})
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, mountDetector mountDetector) (volume.Unmounter, error) {
|
||||
ed := &emptyDir{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
|
||||
volName: volName,
|
||||
medium: v1.StorageMediumDefault, // might be changed later
|
||||
mounter: mounter,
|
||||
mountDetector: mountDetector,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsDu(getPath(podUID, volName, plugin.host)),
|
||||
}
|
||||
return ed, nil
|
||||
}
|
||||
|
||||
func (plugin *emptyDirPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
emptyDirVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(emptyDirVolume), nil
|
||||
}
|
||||
|
||||
// mountDetector abstracts how to find what kind of mount a path is backed by.
|
||||
type mountDetector interface {
|
||||
// GetMountMedium determines what type of medium a given path is backed
|
||||
// by and whether that path is a mount point. For example, if this
|
||||
// returns (v1.StorageMediumMemory, false, nil), the caller knows that the path is
|
||||
// on a memory FS (tmpfs on Linux) but is not the root mountpoint of
|
||||
// that tmpfs.
|
||||
GetMountMedium(path string) (v1.StorageMedium, bool, error)
|
||||
}
|
||||
|
||||
// EmptyDir volumes are temporary directories exposed to the pod.
|
||||
// These do not persist beyond the lifetime of a pod.
|
||||
type emptyDir struct {
|
||||
pod *v1.Pod
|
||||
volName string
|
||||
medium v1.StorageMedium
|
||||
mounter mount.Interface
|
||||
mountDetector mountDetector
|
||||
plugin *emptyDirPlugin
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
func (ed *emptyDir) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: false,
|
||||
Managed: true,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *emptyDir) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp creates new directory.
|
||||
func (ed *emptyDir) SetUp(fsGroup *int64) error {
|
||||
return ed.SetUpAt(ed.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt creates new directory.
|
||||
func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error {
|
||||
notMnt, err := ed.mounter.IsLikelyNotMountPoint(dir)
|
||||
// Getting an os.IsNotExist err from is a contingency; the directory
|
||||
// may not exist yet, in which case, setup should run.
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the plugin readiness file is present for this volume, and the
|
||||
// storage medium is the default, then the volume is ready. If the
|
||||
// medium is memory, and a mountpoint is present, then the volume is
|
||||
// ready.
|
||||
if volumeutil.IsReady(ed.getMetaDir()) {
|
||||
if ed.medium == v1.StorageMediumMemory && !notMnt {
|
||||
return nil
|
||||
} else if ed.medium == v1.StorageMediumDefault {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
switch ed.medium {
|
||||
case v1.StorageMediumDefault:
|
||||
err = ed.setupDir(dir)
|
||||
case v1.StorageMediumMemory:
|
||||
err = ed.setupTmpfs(dir)
|
||||
case v1.StorageMediumHugePages:
|
||||
err = ed.setupHugepages(dir)
|
||||
default:
|
||||
err = fmt.Errorf("unknown storage medium %q", ed.medium)
|
||||
}
|
||||
|
||||
volume.SetVolumeOwnership(ed, fsGroup)
|
||||
|
||||
if err == nil {
|
||||
volumeutil.SetReady(ed.getMetaDir())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// setupTmpfs creates a tmpfs mount at the specified directory.
|
||||
func (ed *emptyDir) setupTmpfs(dir string) error {
|
||||
if ed.mounter == nil {
|
||||
return fmt.Errorf("memory storage requested, but mounter is nil")
|
||||
}
|
||||
if err := ed.setupDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
// Make SetUp idempotent.
|
||||
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the directory is a mountpoint with medium memory, there is no
|
||||
// work to do since we are already in the desired state.
|
||||
if isMnt && medium == v1.StorageMediumMemory {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName)
|
||||
return ed.mounter.Mount("tmpfs", dir, "tmpfs", nil /* options */)
|
||||
}
|
||||
|
||||
// setupHugepages creates a hugepage mount at the specified directory.
|
||||
func (ed *emptyDir) setupHugepages(dir string) error {
|
||||
if ed.mounter == nil {
|
||||
return fmt.Errorf("memory storage requested, but mounter is nil")
|
||||
}
|
||||
if err := ed.setupDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
// Make SetUp idempotent.
|
||||
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the directory is a mountpoint with medium hugepages, there is no
|
||||
// work to do since we are already in the desired state.
|
||||
if isMnt && medium == v1.StorageMediumHugePages {
|
||||
return nil
|
||||
}
|
||||
|
||||
pageSizeMountOption, err := getPageSizeMountOptionFromPod(ed.pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(3).Infof("pod %v: mounting hugepages for volume %v", ed.pod.UID, ed.volName)
|
||||
return ed.mounter.Mount("nodev", dir, "hugetlbfs", []string{pageSizeMountOption})
|
||||
}
|
||||
|
||||
// getPageSizeMountOptionFromPod retrieves pageSize mount option from Pod's resources
|
||||
// and validates pageSize options in all containers of given Pod.
|
||||
func getPageSizeMountOptionFromPod(pod *v1.Pod) (string, error) {
|
||||
pageSizeFound := false
|
||||
pageSize := resource.Quantity{}
|
||||
// In some rare cases init containers can also consume Huge pages.
|
||||
containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
|
||||
for _, container := range containers {
|
||||
// We can take request because limit and requests must match.
|
||||
for requestName := range container.Resources.Requests {
|
||||
if v1helper.IsHugePageResourceName(requestName) {
|
||||
currentPageSize, err := v1helper.HugePageSizeFromResourceName(requestName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// PageSize for all volumes in a POD are equal, except for the first one discovered.
|
||||
if pageSizeFound && pageSize.Cmp(currentPageSize) != 0 {
|
||||
return "", fmt.Errorf("multiple pageSizes for huge pages in a single PodSpec")
|
||||
}
|
||||
pageSize = currentPageSize
|
||||
pageSizeFound = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !pageSizeFound {
|
||||
return "", fmt.Errorf("hugePages storage requested, but there is no resource request for huge pages.")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s=%s", hugePagesPageSizeMountOption, pageSize.String()), nil
|
||||
|
||||
}
|
||||
|
||||
// setupDir creates the directory with the default permissions specified by the perm constant.
|
||||
func (ed *emptyDir) setupDir(dir string) error {
|
||||
// Create the directory if it doesn't already exist.
|
||||
if err := os.MkdirAll(dir, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// stat the directory to read permission bits
|
||||
fileinfo, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fileinfo.Mode().Perm() != perm.Perm() {
|
||||
// If the permissions on the created directory are wrong, the
|
||||
// kubelet is probably running with a umask set. In order to
|
||||
// avoid clearing the umask for the entire process or locking
|
||||
// the thread, clearing the umask, creating the dir, restoring
|
||||
// the umask, and unlocking the thread, we do a chmod to set
|
||||
// the specific bits we need.
|
||||
err := os.Chmod(dir, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileinfo, err = os.Lstat(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fileinfo.Mode().Perm() != perm.Perm() {
|
||||
glog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ed *emptyDir) GetPath() string {
|
||||
return getPath(ed.pod.UID, ed.volName, ed.plugin.host)
|
||||
}
|
||||
|
||||
// TearDown simply discards everything in the directory.
|
||||
func (ed *emptyDir) TearDown() error {
|
||||
return ed.TearDownAt(ed.GetPath())
|
||||
}
|
||||
|
||||
// TearDownAt simply discards everything in the directory.
|
||||
func (ed *emptyDir) TearDownAt(dir string) error {
|
||||
if pathExists, pathErr := volumeutil.PathExists(dir); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Figure out the medium.
|
||||
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isMnt {
|
||||
if medium == v1.StorageMediumMemory {
|
||||
ed.medium = v1.StorageMediumMemory
|
||||
return ed.teardownTmpfsOrHugetlbfs(dir)
|
||||
} else if medium == v1.StorageMediumHugePages {
|
||||
ed.medium = v1.StorageMediumHugePages
|
||||
return ed.teardownTmpfsOrHugetlbfs(dir)
|
||||
}
|
||||
}
|
||||
// assume StorageMediumDefault
|
||||
return ed.teardownDefault(dir)
|
||||
}
|
||||
|
||||
func (ed *emptyDir) teardownDefault(dir string) error {
|
||||
// Renaming the directory is not required anymore because the operation executor
|
||||
// now handles duplicate operations on the same volume
|
||||
err := os.RemoveAll(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ed *emptyDir) teardownTmpfsOrHugetlbfs(dir string) error {
|
||||
if ed.mounter == nil {
|
||||
return fmt.Errorf("memory storage requested, but mounter is nil")
|
||||
}
|
||||
if err := ed.mounter.Unmount(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ed *emptyDir) getMetaDir() string {
|
||||
return path.Join(ed.plugin.host.GetPodPluginDir(ed.pod.UID, stringsutil.EscapeQualifiedNameForDisk(emptyDirPluginName)), ed.volName)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.EmptyDirVolumeSource, bool) {
|
||||
var readOnly bool
|
||||
var volumeSource *v1.EmptyDirVolumeSource
|
||||
|
||||
if spec.Volume != nil && spec.Volume.EmptyDir != nil {
|
||||
volumeSource = spec.Volume.EmptyDir
|
||||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
60
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// Defined by Linux - the type number for tmpfs mounts.
|
||||
const (
|
||||
linuxTmpfsMagic = 0x01021994
|
||||
linuxHugetlbfsMagic = 0x958458f6
|
||||
)
|
||||
|
||||
// realMountDetector implements mountDetector in terms of syscalls.
|
||||
type realMountDetector struct {
|
||||
mounter mount.Interface
|
||||
}
|
||||
|
||||
func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) {
|
||||
glog.V(5).Infof("Determining mount medium of %v", path)
|
||||
notMnt, err := m.mounter.IsLikelyNotMountPoint(path)
|
||||
if err != nil {
|
||||
return v1.StorageMediumDefault, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err)
|
||||
}
|
||||
buf := unix.Statfs_t{}
|
||||
if err := unix.Statfs(path, &buf); err != nil {
|
||||
return v1.StorageMediumDefault, false, fmt.Errorf("statfs(%q): %v", path, err)
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Statfs_t of %v: %+v", path, buf)
|
||||
if buf.Type == linuxTmpfsMagic {
|
||||
return v1.StorageMediumMemory, !notMnt, nil
|
||||
} else if int64(buf.Type) == linuxHugetlbfsMagic {
|
||||
return v1.StorageMediumHugePages, !notMnt, nil
|
||||
}
|
||||
return v1.StorageMediumDefault, !notMnt, nil
|
||||
}
|
441
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_test.go
generated
vendored
441
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_test.go
generated
vendored
@ -1,441 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// Construct an instance of a plugin, by name.
|
||||
func makePluginUnderTest(t *testing.T, plugName, basePath string) volume.VolumePlugin {
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(basePath, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(plugName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
return plug
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("emptydirTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
|
||||
|
||||
if plug.GetPluginName() != "kubernetes.io/empty-dir" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
type fakeMountDetector struct {
|
||||
medium v1.StorageMedium
|
||||
isMount bool
|
||||
}
|
||||
|
||||
func (fake *fakeMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) {
|
||||
return fake.medium, fake.isMount, nil
|
||||
}
|
||||
|
||||
func TestPluginEmptyRootContext(t *testing.T) {
|
||||
doTestPlugin(t, pluginTestConfig{
|
||||
medium: v1.StorageMediumDefault,
|
||||
expectedSetupMounts: 0,
|
||||
expectedTeardownMounts: 0})
|
||||
}
|
||||
|
||||
func TestPluginHugetlbfs(t *testing.T) {
|
||||
doTestPlugin(t, pluginTestConfig{
|
||||
medium: v1.StorageMediumHugePages,
|
||||
expectedSetupMounts: 1,
|
||||
expectedTeardownMounts: 0,
|
||||
shouldBeMountedBeforeTeardown: true,
|
||||
})
|
||||
}
|
||||
|
||||
type pluginTestConfig struct {
|
||||
medium v1.StorageMedium
|
||||
idempotent bool
|
||||
expectedSetupMounts int
|
||||
shouldBeMountedBeforeTeardown bool
|
||||
expectedTeardownMounts int
|
||||
}
|
||||
|
||||
// doTestPlugin sets up a volume and tears it back down.
|
||||
func doTestPlugin(t *testing.T, config pluginTestConfig) {
|
||||
basePath, err := utiltesting.MkTmpdir("emptydir_volume_test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(basePath)
|
||||
|
||||
var (
|
||||
volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume")
|
||||
metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume")
|
||||
|
||||
plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
|
||||
volumeName = "test-volume"
|
||||
spec = &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: config.medium}},
|
||||
}
|
||||
|
||||
physicalMounter = mount.FakeMounter{}
|
||||
mountDetector = fakeMountDetector{}
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID("poduid"),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
if config.idempotent {
|
||||
physicalMounter.MountPoints = []mount.MountPoint{
|
||||
{
|
||||
Path: volumePath,
|
||||
},
|
||||
}
|
||||
util.SetReady(metadataDir)
|
||||
}
|
||||
|
||||
mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec),
|
||||
pod,
|
||||
&physicalMounter,
|
||||
&mountDetector,
|
||||
volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := mounter.GetPath()
|
||||
if volPath != volumePath {
|
||||
t.Errorf("Got unexpected path: %s", volPath)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
// Stat the directory and check the permission bits
|
||||
fileinfo, err := os.Stat(volPath)
|
||||
if !config.idempotent {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", volPath)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if e, a := perm, fileinfo.Mode().Perm(); e != a {
|
||||
t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a)
|
||||
}
|
||||
} else if err == nil {
|
||||
// If this test is for idempotency and we were able
|
||||
// to stat the volume path, it's an error.
|
||||
t.Errorf("Volume directory was created unexpectedly")
|
||||
}
|
||||
|
||||
// Check the number of mounts performed during setup
|
||||
if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a {
|
||||
t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a)
|
||||
} else if config.expectedSetupMounts == 1 &&
|
||||
(physicalMounter.Log[0].Action != mount.FakeActionMount || (physicalMounter.Log[0].FSType != "tmpfs" && physicalMounter.Log[0].FSType != "hugetlbfs")) {
|
||||
t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0])
|
||||
}
|
||||
physicalMounter.ResetLog()
|
||||
|
||||
// Make an unmounter for the volume
|
||||
teardownMedium := v1.StorageMediumDefault
|
||||
if config.medium == v1.StorageMediumMemory {
|
||||
teardownMedium = v1.StorageMediumMemory
|
||||
}
|
||||
unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
|
||||
unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
// Tear down the volume
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volPath); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", volPath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
|
||||
// Check the number of physicalMounter calls during tardown
|
||||
if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a {
|
||||
t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a)
|
||||
} else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount {
|
||||
t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0])
|
||||
}
|
||||
physicalMounter.ResetLog()
|
||||
}
|
||||
|
||||
func TestPluginBackCompat(t *testing.T) {
|
||||
basePath, err := utiltesting.MkTmpdir("emptydirTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(basePath)
|
||||
|
||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
|
||||
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := mounter.GetPath()
|
||||
if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") {
|
||||
t.Errorf("Got unexpected path: %s", volPath)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMetrics tests that MetricProvider methods return sane values.
|
||||
func TestMetrics(t *testing.T) {
|
||||
// Create an empty temp directory for the volume
|
||||
tmpDir, err := utiltesting.MkTmpdir("empty_dir_test")
|
||||
if err != nil {
|
||||
t.Fatalf("Can't make a tmp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
|
||||
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
// Need to create the subdirectory
|
||||
os.MkdirAll(mounter.GetPath(), 0755)
|
||||
|
||||
expectedEmptyDirUsage, err := volumetest.FindEmptyDirectoryUsageOnTmpfs()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error finding expected empty directory usage on tmpfs: %v", err)
|
||||
}
|
||||
|
||||
// TODO(pwittroc): Move this into a reusable testing utility
|
||||
metrics, err := mounter.GetMetrics()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when calling GetMetrics %v", err)
|
||||
}
|
||||
if e, a := expectedEmptyDirUsage.Value(), metrics.Used.Value(); e != a {
|
||||
t.Errorf("Unexpected value for empty directory; expected %v, got %v", e, a)
|
||||
}
|
||||
if metrics.Capacity.Value() <= 0 {
|
||||
t.Errorf("Expected Capacity to be greater than 0")
|
||||
}
|
||||
if metrics.Available.Value() <= 0 {
|
||||
t.Errorf("Expected Available to be greater than 0")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHugePagesMountOptions(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
shouldFail bool
|
||||
expectedResult string
|
||||
}{
|
||||
"testWithProperValues": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFail: false,
|
||||
expectedResult: "pagesize=2Mi",
|
||||
},
|
||||
"testWithProperValuesAndDifferentPageSize": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-1Gi"): resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFail: false,
|
||||
expectedResult: "pagesize=1Gi",
|
||||
},
|
||||
"InitContainerAndContainerHasProperValues": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-1Gi"): resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFail: false,
|
||||
expectedResult: "pagesize=1Gi",
|
||||
},
|
||||
"InitContainerAndContainerHasDifferentPageSizes": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-2Mi"): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-1Gi"): resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFail: true,
|
||||
expectedResult: "",
|
||||
},
|
||||
"ContainersWithMultiplePageSizes": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName("hugepages-2Mi"): resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldFail: true,
|
||||
expectedResult: "",
|
||||
},
|
||||
"PodWithNoHugePagesRequest": {
|
||||
pod: &v1.Pod{},
|
||||
shouldFail: true,
|
||||
expectedResult: "",
|
||||
},
|
||||
}
|
||||
|
||||
for testCaseName, testCase := range testCases {
|
||||
value, err := getPageSizeMountOptionFromPod(testCase.pod)
|
||||
if testCase.shouldFail && err == nil {
|
||||
t.Errorf("Expected an error in %v", testCaseName)
|
||||
} else if !testCase.shouldFail && err != nil {
|
||||
t.Errorf("Unexpected error in %v, got %v", testCaseName, err)
|
||||
} else if testCase.expectedResult != value {
|
||||
t.Errorf("Unexpected mountOptions for Pod. Expected %v, got %v", testCase.expectedResult, value)
|
||||
}
|
||||
}
|
||||
}
|
33
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// realMountDetector pretends to implement mediumer.
|
||||
type realMountDetector struct {
|
||||
mounter mount.Interface
|
||||
}
|
||||
|
||||
func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) {
|
||||
return v1.StorageMediumDefault, false, nil
|
||||
}
|
65
vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD
generated
vendored
65
vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD
generated
vendored
@ -1,65 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"disk_manager.go",
|
||||
"doc.go",
|
||||
"fc.go",
|
||||
"fc_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/fc",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"fc_test.go",
|
||||
"fc_util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
10
vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS
generated
vendored
@ -1,10 +0,0 @@
|
||||
approvers:
|
||||
- rootfs
|
||||
- saad-ali
|
||||
reviewers:
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
||||
- mtanino
|
222
vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go
generated
vendored
222
vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go
generated
vendored
@ -1,222 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type fcAttacher struct {
|
||||
host volume.VolumeHost
|
||||
manager diskManager
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &fcAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &fcPlugin{}
|
||||
|
||||
func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
return &fcAttacher{
|
||||
host: plugin.host,
|
||||
manager: &FCUtil{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
func (attacher *fcAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (attacher *fcAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
for _, spec := range specs {
|
||||
volumesAttachedCheck[spec] = true
|
||||
}
|
||||
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
func (attacher *fcAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
||||
mounter, err := volumeSpecToMounter(spec, attacher.host)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get fc mounter: %v", err)
|
||||
return "", err
|
||||
}
|
||||
return attacher.manager.AttachDisk(*mounter)
|
||||
}
|
||||
|
||||
func (attacher *fcAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
mounter, err := volumeSpecToMounter(spec, attacher.host)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get fc mounter: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return attacher.manager.MakeGlobalPDName(*mounter.fcDisk), nil
|
||||
}
|
||||
|
||||
func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.host.GetMounter(fcPluginName)
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(fcPluginName)}
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type fcDetacher struct {
|
||||
mounter mount.Interface
|
||||
manager diskManager
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &fcDetacher{}
|
||||
|
||||
func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
return &fcDetacher{
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
manager: &FCUtil{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *fcDetacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
// Specify device name for DetachDisk later
|
||||
devName, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err)
|
||||
return err
|
||||
}
|
||||
// Unmount for deviceMountPath(=globalPDPath)
|
||||
err = volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to unmount: %s\nError: %v", deviceMountPath, err)
|
||||
}
|
||||
unMounter := volumeSpecToUnmounter(detacher.mounter)
|
||||
err = detacher.manager.DetachDisk(*unMounter, devName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", devName, err)
|
||||
}
|
||||
glog.V(4).Infof("fc: successfully detached disk: %s", devName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMounter, error) {
|
||||
fc, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var lun string
|
||||
var wwids []string
|
||||
if fc.Lun != nil && len(fc.TargetWWNs) != 0 {
|
||||
lun = strconv.Itoa(int(*fc.Lun))
|
||||
} else if len(fc.WWIDs) != 0 {
|
||||
for _, wwid := range fc.WWIDs {
|
||||
wwids = append(wwids, strings.Replace(wwid, " ", "_", -1))
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mounter")
|
||||
}
|
||||
fcDisk := &fcDisk{
|
||||
plugin: &fcPlugin{
|
||||
host: host,
|
||||
},
|
||||
wwns: fc.TargetWWNs,
|
||||
lun: lun,
|
||||
wwids: wwids,
|
||||
io: &osIOHandler{},
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := volumeutil.GetVolumeMode(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode)
|
||||
return &fcDiskMounter{
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
volumeMode: volumeMode,
|
||||
readOnly: readOnly,
|
||||
mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
return &fcDiskMounter{
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func volumeSpecToUnmounter(mounter mount.Interface) *fcDiskUnmounter {
|
||||
return &fcDiskUnmounter{
|
||||
fcDisk: &fcDisk{
|
||||
io: &osIOHandler{},
|
||||
},
|
||||
mounter: mounter,
|
||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||
}
|
||||
}
|
94
vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go
generated
vendored
94
vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go
generated
vendored
@ -1,94 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// Abstract interface to disk operations.
|
||||
type diskManager interface {
|
||||
MakeGlobalPDName(disk fcDisk) string
|
||||
MakeGlobalVDPDName(disk fcDisk) string
|
||||
// Attaches the disk to the kubelet's host machine.
|
||||
AttachDisk(b fcDiskMounter) (string, error)
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
DetachDisk(disk fcDiskUnmounter, devicePath string) error
|
||||
// Detaches the block disk from the kubelet's host machine.
|
||||
DetachBlockFCDisk(disk fcDiskUnmapper, mntPath, devicePath string) error
|
||||
}
|
||||
|
||||
// utility to mount a disk based filesystem
|
||||
func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
|
||||
globalPDPath := manager.MakeGlobalPDName(*b.fcDisk)
|
||||
noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
|
||||
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate mountpoint: %s", volPath)
|
||||
return err
|
||||
}
|
||||
if !noMnt {
|
||||
return nil
|
||||
}
|
||||
if err := os.MkdirAll(volPath, 0750); err != nil {
|
||||
glog.Errorf("failed to mkdir:%s", volPath)
|
||||
return err
|
||||
}
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = mounter.Mount(globalPDPath, volPath, "", options)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
|
||||
noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !noMnt {
|
||||
if mntErr = b.mounter.Unmount(volPath); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
noMnt, mntErr = b.mounter.IsLikelyNotMountPoint(volPath)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !noMnt {
|
||||
// will most likely retry on next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(volPath)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(&b, fsGroup)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/volume/fc/doc.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/fc/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package fc contains the internal representation of
|
||||
// Fibre Channel (fc) volumes.
|
||||
package fc // import "k8s.io/kubernetes/pkg/volume/fc"
|
526
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go
generated
vendored
526
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go
generated
vendored
@ -1,526 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&fcPlugin{nil}}
|
||||
}
|
||||
|
||||
type fcPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &fcPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &fcPlugin{}
|
||||
var _ volume.BlockVolumePlugin = &fcPlugin{}
|
||||
|
||||
const (
|
||||
fcPluginName = "kubernetes.io/fc"
|
||||
)
|
||||
|
||||
func (plugin *fcPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) GetPluginName() string {
|
||||
return fcPluginName
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// API server validates these parameters beforehand but attach/detach
|
||||
// controller creates volumespec without validation. They may be nil
|
||||
// or zero length. We should check again to avoid unexpected conditions.
|
||||
if len(volumeSource.TargetWWNs) != 0 && volumeSource.Lun != nil {
|
||||
// TargetWWNs are the FibreChannel target worldwide names
|
||||
return fmt.Sprintf("%v:%v", volumeSource.TargetWWNs, *volumeSource.Lun), nil
|
||||
} else if len(volumeSource.WWIDs) != 0 {
|
||||
// WWIDs are the FibreChannel World Wide Identifiers
|
||||
return fmt.Sprintf("%v", volumeSource.WWIDs), nil
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return (spec.Volume != nil && spec.Volume.FC != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FC != nil)
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) RequiresRemount() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) SupportsMountOption() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) {
|
||||
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
fc, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wwns, lun, wwids, err := getWwnsLunWwids(fc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mounter")
|
||||
}
|
||||
fcDisk := &fcDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
wwns: wwns,
|
||||
lun: lun,
|
||||
wwids: wwids,
|
||||
manager: manager,
|
||||
io: &osIOHandler{},
|
||||
plugin: plugin,
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := util.GetVolumeMode(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode)
|
||||
return &fcDiskMounter{
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
volumeMode: volumeMode,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
return &fcDiskMounter{
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
// If this called via GenerateUnmapDeviceFunc(), pod is nil.
|
||||
// Pass empty string as dummy uid since uid isn't used in the case.
|
||||
var uid types.UID
|
||||
if pod != nil {
|
||||
uid = pod.UID
|
||||
}
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) {
|
||||
fc, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wwns, lun, wwids, err := getWwnsLunWwids(fc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mapper")
|
||||
}
|
||||
|
||||
return &fcDiskMapper{
|
||||
fcDisk: &fcDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
wwns: wwns,
|
||||
lun: lun,
|
||||
wwids: wwids,
|
||||
manager: manager,
|
||||
io: &osIOHandler{},
|
||||
plugin: plugin},
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &fcDiskUnmounter{
|
||||
fcDisk: &fcDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
io: &osIOHandler{},
|
||||
},
|
||||
mounter: mounter,
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return plugin.newUnmapperInternal(volName, podUID, &FCUtil{})
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) {
|
||||
return &fcDiskUnmapper{
|
||||
fcDisk: &fcDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
io: &osIOHandler{},
|
||||
},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
// Find globalPDPath from pod volume directory(mountPath)
|
||||
// examples:
|
||||
// mountPath: pods/{podUid}/volumes/kubernetes.io~fc/{volumeName}
|
||||
// globalPDPath : plugins/kubernetes.io/fc/50060e801049cfd1-lun-0
|
||||
var globalPDPath string
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
paths, err := mount.GetMountRefs(mounter, mountPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, path := range paths {
|
||||
if strings.Contains(path, plugin.host.GetPluginDir(fcPluginName)) {
|
||||
globalPDPath = path
|
||||
break
|
||||
}
|
||||
}
|
||||
// Couldn't fetch globalPDPath
|
||||
if len(globalPDPath) == 0 {
|
||||
return nil, fmt.Errorf("couldn't fetch globalPDPath. failed to obtain volume spec")
|
||||
}
|
||||
arr := strings.Split(globalPDPath, "/")
|
||||
if len(arr) < 1 {
|
||||
return nil, fmt.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath)
|
||||
}
|
||||
volumeInfo := arr[len(arr)-1]
|
||||
// Create volume from wwn+lun or wwid
|
||||
var fcVolume *v1.Volume
|
||||
if strings.Contains(volumeInfo, "-lun-") {
|
||||
wwnLun := strings.Split(volumeInfo, "-lun-")
|
||||
if len(wwnLun) < 2 {
|
||||
return nil, fmt.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo)
|
||||
}
|
||||
lun, err := strconv.Atoi(wwnLun[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lun32 := int32(lun)
|
||||
fcVolume = &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32},
|
||||
},
|
||||
}
|
||||
glog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v",
|
||||
fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun)
|
||||
} else {
|
||||
fcVolume = &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{WWIDs: []string{volumeInfo}},
|
||||
},
|
||||
}
|
||||
glog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs)
|
||||
}
|
||||
return volume.NewSpecFromVolume(fcVolume), nil
|
||||
}
|
||||
|
||||
// ConstructBlockVolumeSpec creates a new volume.Spec with following steps.
|
||||
// - Searches a file whose name is {pod uuid} under volume plugin directory.
|
||||
// - If a file is found, then retreives volumePluginDependentPath from globalMapPathUUID.
|
||||
// - Once volumePluginDependentPath is obtained, store volume information to VolumeSource
|
||||
// examples:
|
||||
// mapPath: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
|
||||
// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid}
|
||||
func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName)
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
|
||||
|
||||
// Retrieve volumePluginDependentPath from globalMapPathUUID
|
||||
// globalMapPathUUID examples:
|
||||
// wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/{pod uuid}
|
||||
// wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/{pod uuid}
|
||||
arr := strings.Split(globalMapPathUUID, "/")
|
||||
if len(arr) < 2 {
|
||||
return nil, fmt.Errorf("Fail to retrieve volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
|
||||
}
|
||||
l := len(arr) - 2
|
||||
volumeInfo := arr[l]
|
||||
|
||||
// Create volume from wwn+lun or wwid
|
||||
var fcPV *v1.PersistentVolume
|
||||
if strings.Contains(volumeInfo, "-lun-") {
|
||||
wwnLun := strings.Split(volumeInfo, "-lun-")
|
||||
lun, err := strconv.Atoi(wwnLun[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lun32 := int32(lun)
|
||||
fcPV = createPersistentVolumeFromFCVolumeSource(volumeName,
|
||||
v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32})
|
||||
glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v",
|
||||
fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs,
|
||||
*fcPV.Spec.PersistentVolumeSource.FC.Lun)
|
||||
} else {
|
||||
fcPV = createPersistentVolumeFromFCVolumeSource(volumeName,
|
||||
v1.FCVolumeSource{WWIDs: []string{volumeInfo}})
|
||||
glog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs)
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(fcPV, false), nil
|
||||
}
|
||||
|
||||
type fcDisk struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
portal string
|
||||
wwns []string
|
||||
lun string
|
||||
wwids []string
|
||||
plugin *fcPlugin
|
||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
||||
manager diskManager
|
||||
// io handler interface
|
||||
io ioHandler
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
func (fc *fcDisk) GetPath() string {
|
||||
name := fcPluginName
|
||||
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
|
||||
return fc.plugin.host.GetPodVolumeDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(name), fc.volName)
|
||||
}
|
||||
|
||||
func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
mounter, err := volumeSpecToMounter(spec, fc.plugin.host)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get fc mounter: %v", err)
|
||||
return "", err
|
||||
}
|
||||
return fc.manager.MakeGlobalVDPDName(*mounter.fcDisk), nil
|
||||
}
|
||||
|
||||
func (fc *fcDisk) fcPodDeviceMapPath() (string, string) {
|
||||
name := fcPluginName
|
||||
return fc.plugin.host.GetPodVolumeDeviceDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(name)), fc.volName
|
||||
}
|
||||
|
||||
type fcDiskMounter struct {
|
||||
*fcDisk
|
||||
readOnly bool
|
||||
fsType string
|
||||
volumeMode v1.PersistentVolumeMode
|
||||
mounter *mount.SafeFormatAndMount
|
||||
deviceUtil util.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &fcDiskMounter{}
|
||||
|
||||
func (b *fcDiskMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *fcDiskMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fcDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
// diskSetUp checks mountpoints and prevent repeated calls
|
||||
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("fc: failed to setup")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type fcDiskUnmounter struct {
|
||||
*fcDisk
|
||||
mounter mount.Interface
|
||||
deviceUtil util.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &fcDiskUnmounter{}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the disk
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *fcDiskUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *fcDiskUnmounter) TearDownAt(dir string) error {
|
||||
return util.UnmountPath(dir, c.mounter)
|
||||
}
|
||||
|
||||
// Block Volumes Support
|
||||
type fcDiskMapper struct {
|
||||
*fcDisk
|
||||
readOnly bool
|
||||
mounter mount.Interface
|
||||
deviceUtil util.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &fcDiskMapper{}
|
||||
|
||||
func (b *fcDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *fcDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
type fcDiskUnmapper struct {
|
||||
*fcDisk
|
||||
deviceUtil util.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &fcDiskUnmapper{}
|
||||
|
||||
func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
err := c.manager.DetachBlockFCDisk(*c, mapPath, devicePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", mapPath, err)
|
||||
}
|
||||
glog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath)
|
||||
err = os.RemoveAll(mapPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to delete the directory: %s\nError: %v", mapPath, err)
|
||||
}
|
||||
glog.V(4).Infof("fc: successfully detached disk: %s", mapPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{WWID}/{podUid}
|
||||
func (fc *fcDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
return fc.fcGlobalMapPath(spec)
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~fc
|
||||
// volumeName: pv0001
|
||||
func (fc *fcDisk) GetPodDeviceMapPath() (string, string) {
|
||||
return fc.fcPodDeviceMapPath()
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) {
|
||||
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
if spec.Volume != nil && spec.Volume.FC != nil {
|
||||
return spec.Volume.FC, spec.Volume.FC.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.FC != nil {
|
||||
return spec.PersistentVolume.Spec.FC, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a FibreChannel volume type")
|
||||
}
|
||||
|
||||
func createPersistentVolumeFromFCVolumeSource(volumeName string, fc v1.FCVolumeSource) *v1.PersistentVolume {
|
||||
block := v1.PersistentVolumeBlock
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: volumeName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FC: &fc,
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getWwnsLunWwids(fc *v1.FCVolumeSource) ([]string, string, []string, error) {
|
||||
var lun string
|
||||
var wwids []string
|
||||
if fc.Lun != nil && len(fc.TargetWWNs) != 0 {
|
||||
lun = strconv.Itoa(int(*fc.Lun))
|
||||
return fc.TargetWWNs, lun, wwids, nil
|
||||
}
|
||||
if len(fc.WWIDs) != 0 {
|
||||
for _, wwid := range fc.WWIDs {
|
||||
wwids = append(wwids, strings.Replace(wwid, " ", "_", -1))
|
||||
}
|
||||
return fc.TargetWWNs, lun, wwids, nil
|
||||
}
|
||||
return nil, "", nil, fmt.Errorf("fc: no fc disk information found")
|
||||
}
|
503
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go
generated
vendored
503
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go
generated
vendored
@ -1,503 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plug.GetPluginName() != "kubernetes.io/fc" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{FC: &v1.FCVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FC: &v1.FCVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
|
||||
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeDiskManager struct {
|
||||
tmpDir string
|
||||
attachCalled bool
|
||||
detachCalled bool
|
||||
}
|
||||
|
||||
func NewFakeDiskManager() *fakeDiskManager {
|
||||
return &fakeDiskManager{
|
||||
tmpDir: utiltesting.MkTmpdirOrDie("fc_test"),
|
||||
}
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) Cleanup() {
|
||||
os.RemoveAll(fake.tmpDir)
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) MakeGlobalPDName(disk fcDisk) string {
|
||||
return fake.tmpDir
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) MakeGlobalVDPDName(disk fcDisk) string {
|
||||
return fake.tmpDir
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) AttachDisk(b fcDiskMounter) (string, error) {
|
||||
globalPath := b.manager.MakeGlobalPDName(*b.fcDisk)
|
||||
err := os.MkdirAll(globalPath, 0750)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Simulate the global mount so that the fakeMounter returns the
|
||||
// expected number of mounts for the attached disk.
|
||||
b.mounter.Mount(globalPath, globalPath, b.fsType, nil)
|
||||
|
||||
fake.attachCalled = true
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) DetachDisk(c fcDiskUnmounter, mntPath string) error {
|
||||
globalPath := c.manager.MakeGlobalPDName(*c.fcDisk)
|
||||
err := os.RemoveAll(globalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fake.detachCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
|
||||
err := os.RemoveAll(mapPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fake.detachCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fakeManager := NewFakeDiskManager()
|
||||
defer fakeManager.Cleanup()
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
fakeExec := mount.NewFakeExec(nil)
|
||||
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter: %v", err)
|
||||
}
|
||||
|
||||
path := mounter.GetPath()
|
||||
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fc/vol1", tmpDir)
|
||||
if path != expectedPath {
|
||||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fakeManager2 := NewFakeDiskManager()
|
||||
defer fakeManager2.Cleanup()
|
||||
unmounter, err := plug.(*fcPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter: %v", err)
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doTestPluginNilMounter(t *testing.T, spec *volume.Spec) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fakeManager := NewFakeDiskManager()
|
||||
defer fakeManager.Cleanup()
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
fakeExec := mount.NewFakeExec(nil)
|
||||
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
|
||||
if err == nil {
|
||||
t.Errorf("Error failed to make a new Mounter is expected: %v", err)
|
||||
}
|
||||
if mounter != nil {
|
||||
t.Errorf("A nil Mounter is expected: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginVolume(t *testing.T) {
|
||||
lun := int32(0)
|
||||
vol := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
TargetWWNs: []string{"500a0981891b8dc5"},
|
||||
FSType: "ext4",
|
||||
Lun: &lun,
|
||||
},
|
||||
},
|
||||
}
|
||||
doTestPlugin(t, volume.NewSpecFromVolume(vol))
|
||||
}
|
||||
|
||||
func TestPluginPersistentVolume(t *testing.T) {
|
||||
lun := int32(0)
|
||||
vol := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vol1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
TargetWWNs: []string{"500a0981891b8dc5"},
|
||||
FSType: "ext4",
|
||||
Lun: &lun,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
|
||||
}
|
||||
|
||||
func TestPluginVolumeWWIDs(t *testing.T) {
|
||||
vol := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
WWIDs: []string{"3600508b400105e210000900000490000"},
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
doTestPlugin(t, volume.NewSpecFromVolume(vol))
|
||||
}
|
||||
|
||||
func TestPluginPersistentVolumeWWIDs(t *testing.T) {
|
||||
vol := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vol1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
WWIDs: []string{"3600508b400105e21 000900000490000"},
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
|
||||
}
|
||||
|
||||
func TestPluginVolumeNoDiskInfo(t *testing.T) {
|
||||
vol := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
doTestPluginNilMounter(t, volume.NewSpecFromVolume(vol))
|
||||
}
|
||||
|
||||
func TestPluginPersistentVolumeNoDiskInfo(t *testing.T) {
|
||||
vol := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vol1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
doTestPluginNilMounter(t, volume.NewSpecFromPersistentVolume(vol, false))
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
lun := int32(0)
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FC: &v1.FCVolumeSource{
|
||||
TargetWWNs: []string{"some_wwn"},
|
||||
FSType: "ext4",
|
||||
Lun: &lun,
|
||||
},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(pv, claim)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
||||
plug, _ := plugMgr.FindPluginByName(fcPluginName)
|
||||
|
||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, true)
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
if !mounter.GetAttributes().ReadOnly {
|
||||
t.Errorf("Expected true for mounter.IsReadOnly")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getWwnsLun(t *testing.T) {
|
||||
num := int32(0)
|
||||
fc := &v1.FCVolumeSource{
|
||||
TargetWWNs: []string{"500a0981891b8dc5"},
|
||||
FSType: "ext4",
|
||||
Lun: &num,
|
||||
}
|
||||
wwn, lun, _, err := getWwnsLunWwids(fc)
|
||||
// if no wwn and lun, exit
|
||||
if (len(wwn) == 0 && lun != "0") || err != nil {
|
||||
t.Errorf("no fc disk found")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getWwids(t *testing.T) {
|
||||
fc := &v1.FCVolumeSource{
|
||||
FSType: "ext4",
|
||||
WWIDs: []string{"3600508b400105e210000900000490000"},
|
||||
}
|
||||
_, _, wwid, err := getWwnsLunWwids(fc)
|
||||
// if no wwn and lun, exit
|
||||
if len(wwid) == 0 || err != nil {
|
||||
t.Errorf("no fc disk found")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getWwnsLunWwidsError(t *testing.T) {
|
||||
fc := &v1.FCVolumeSource{
|
||||
FSType: "ext4",
|
||||
}
|
||||
wwn, lun, wwid, err := getWwnsLunWwids(fc)
|
||||
// expected no wwn and lun and wwid
|
||||
if (len(wwn) != 0 && lun != "" && len(wwid) != 0) || err == nil {
|
||||
t.Errorf("unexpected fc disk found")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ConstructVolumeSpec(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skipf("Test_ConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
|
||||
}
|
||||
fm := &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
|
||||
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50060e801049cfd1-lun-0"},
|
||||
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2"},
|
||||
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000"},
|
||||
},
|
||||
}
|
||||
mountPaths := []string{
|
||||
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
|
||||
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2",
|
||||
}
|
||||
for _, path := range mountPaths {
|
||||
refs, err := mount.GetMountRefs(fm, path)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get mountrefs. err: %v", err)
|
||||
}
|
||||
var globalPDPath string
|
||||
for _, ref := range refs {
|
||||
if strings.Contains(ref, "kubernetes.io/fc") {
|
||||
globalPDPath = ref
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(globalPDPath) == 0 {
|
||||
t.Errorf("couldn't fetch mountrefs")
|
||||
}
|
||||
arr := strings.Split(globalPDPath, "/")
|
||||
if len(arr) < 1 {
|
||||
t.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath)
|
||||
}
|
||||
volumeInfo := arr[len(arr)-1]
|
||||
if strings.Contains(volumeInfo, "-lun-") {
|
||||
wwnLun := strings.Split(volumeInfo, "-lun-")
|
||||
if len(wwnLun) < 2 {
|
||||
t.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo)
|
||||
}
|
||||
lun, _ := strconv.Atoi(wwnLun[1])
|
||||
lun32 := int32(lun)
|
||||
if wwnLun[0] != "50060e801049cfd1" || lun32 != 0 {
|
||||
t.Errorf("failed to retrieve TargetWWN and Lun")
|
||||
}
|
||||
} else {
|
||||
if volumeInfo != "3600508b400105e210000900000490000" {
|
||||
t.Errorf("failed to retrieve WWIDs")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ConstructVolumeSpecNoRefs(t *testing.T) {
|
||||
fm := &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{Device: "/dev/sdd", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
|
||||
},
|
||||
}
|
||||
mountPaths := []string{
|
||||
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
|
||||
}
|
||||
for _, path := range mountPaths {
|
||||
refs, _ := mount.GetMountRefs(fm, path)
|
||||
var globalPDPath string
|
||||
for _, ref := range refs {
|
||||
if strings.Contains(ref, "kubernetes.io/fc") {
|
||||
globalPDPath = ref
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(globalPDPath) != 0 {
|
||||
t.Errorf("invalid globalPDPath")
|
||||
}
|
||||
}
|
||||
}
|
409
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go
generated
vendored
409
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go
generated
vendored
@ -1,409 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
type ioHandler interface {
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
Lstat(name string) (os.FileInfo, error)
|
||||
EvalSymlinks(path string) (string, error)
|
||||
WriteFile(filename string, data []byte, perm os.FileMode) error
|
||||
}
|
||||
|
||||
type osIOHandler struct{}
|
||||
|
||||
const (
|
||||
byPath = "/dev/disk/by-path/"
|
||||
byID = "/dev/disk/by-id/"
|
||||
)
|
||||
|
||||
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
|
||||
return os.Lstat(name)
|
||||
}
|
||||
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
|
||||
return filepath.EvalSymlinks(path)
|
||||
}
|
||||
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return ioutil.WriteFile(filename, data, perm)
|
||||
}
|
||||
|
||||
// given a wwn and lun, find the device and associated devicemapper parent
|
||||
func findDisk(wwn, lun string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) {
|
||||
fc_path := "-fc-0x" + wwn + "-lun-" + lun
|
||||
dev_path := byPath
|
||||
if dirs, err := io.ReadDir(dev_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if strings.Contains(name, fc_path) {
|
||||
if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil {
|
||||
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
|
||||
glog.Infof("fc: find disk: %v, dm: %v", disk, dm)
|
||||
return disk, dm
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// given a wwid, find the device and associated devicemapper parent
|
||||
func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) {
|
||||
// Example wwid format:
|
||||
// 3600508b400105e210000900000490000
|
||||
// <VENDOR NAME> <IDENTIFIER NUMBER>
|
||||
// Example of symlink under by-id:
|
||||
// /dev/by-id/scsi-3600508b400105e210000900000490000
|
||||
// /dev/by-id/scsi-<VENDOR NAME>_<IDENTIFIER NUMBER>
|
||||
// The wwid could contain white space and it will be replaced
|
||||
// underscore when wwid is exposed under /dev/by-id.
|
||||
|
||||
fc_path := "scsi-" + wwid
|
||||
dev_id := byID
|
||||
if dirs, err := io.ReadDir(dev_id); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if name == fc_path {
|
||||
disk, err := io.EvalSymlinks(dev_id + name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", dev_id+name, err)
|
||||
return "", ""
|
||||
}
|
||||
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
|
||||
glog.Infof("fc: find disk: %v, dm: %v", disk, dm)
|
||||
return disk, dm
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("fc: failed to find a disk [%s]", dev_id+fc_path)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// Removes a scsi device based upon /dev/sdX name
|
||||
func removeFromScsiSubsystem(deviceName string, io ioHandler) {
|
||||
fileName := "/sys/block/" + deviceName + "/device/delete"
|
||||
glog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName)
|
||||
data := []byte("1")
|
||||
io.WriteFile(fileName, data, 0666)
|
||||
}
|
||||
|
||||
// rescan scsi bus
|
||||
func scsiHostRescan(io ioHandler) {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsi_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
io.WriteFile(name, data, 0666)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/target-lun-0
|
||||
func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
|
||||
if len(wwns) != 0 {
|
||||
return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
|
||||
} else {
|
||||
return path.Join(host.GetPluginDir(fcPluginName), wwids[0])
|
||||
}
|
||||
}
|
||||
|
||||
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/target-lun-0
|
||||
func makeVDPDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
|
||||
if len(wwns) != 0 {
|
||||
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
|
||||
} else {
|
||||
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0])
|
||||
}
|
||||
}
|
||||
|
||||
type FCUtil struct{}
|
||||
|
||||
func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string {
|
||||
return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
|
||||
}
|
||||
|
||||
// Global volume device plugin dir
|
||||
func (util *FCUtil) MakeGlobalVDPDName(fc fcDisk) string {
|
||||
return makeVDPDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
|
||||
}
|
||||
|
||||
func searchDisk(b fcDiskMounter) (string, error) {
|
||||
var diskIds []string
|
||||
var disk string
|
||||
var dm string
|
||||
io := b.io
|
||||
wwids := b.wwids
|
||||
wwns := b.wwns
|
||||
lun := b.lun
|
||||
|
||||
if len(wwns) != 0 {
|
||||
diskIds = wwns
|
||||
} else {
|
||||
diskIds = wwids
|
||||
}
|
||||
|
||||
rescaned := false
|
||||
// two-phase search:
|
||||
// first phase, search existing device path, if a multipath dm is found, exit loop
|
||||
// otherwise, in second phase, rescan scsi bus and search again, return with any findings
|
||||
for true {
|
||||
for _, diskId := range diskIds {
|
||||
if len(wwns) != 0 {
|
||||
disk, dm = findDisk(diskId, lun, io, b.deviceUtil)
|
||||
} else {
|
||||
disk, dm = findDiskWWIDs(diskId, io, b.deviceUtil)
|
||||
}
|
||||
// if multipath device is found, break
|
||||
if dm != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
// if a dm is found, exit loop
|
||||
if rescaned || dm != "" {
|
||||
break
|
||||
}
|
||||
// rescan and search again
|
||||
// rescan scsi bus
|
||||
scsiHostRescan(io)
|
||||
rescaned = true
|
||||
}
|
||||
// if no disk matches input wwn and lun, exit
|
||||
if disk == "" && dm == "" {
|
||||
return "", fmt.Errorf("no fc disk found")
|
||||
}
|
||||
|
||||
// if multipath devicemapper device is found, use it; otherwise use raw disk
|
||||
if dm != "" {
|
||||
return dm, nil
|
||||
}
|
||||
return disk, nil
|
||||
}
|
||||
|
||||
func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
|
||||
devicePath, err := searchDisk(b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
// If the volumeMode is 'Block', plugin don't have to format the volume.
|
||||
// The globalPDPath will be created by operationexecutor. Just return devicePath here.
|
||||
glog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath)
|
||||
if b.volumeMode == v1.PersistentVolumeBlock {
|
||||
return devicePath, nil
|
||||
}
|
||||
}
|
||||
|
||||
// mount it
|
||||
globalPDPath := util.MakeGlobalPDName(*b.fcDisk)
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
return devicePath, fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath)
|
||||
}
|
||||
|
||||
noMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
|
||||
if err != nil {
|
||||
return devicePath, fmt.Errorf("Heuristic determination of mount point failed:%v", err)
|
||||
}
|
||||
if !noMnt {
|
||||
glog.Infof("fc: %s already mounted", globalPDPath)
|
||||
return devicePath, nil
|
||||
}
|
||||
|
||||
err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil)
|
||||
if err != nil {
|
||||
return devicePath, fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
|
||||
}
|
||||
|
||||
return devicePath, err
|
||||
}
|
||||
|
||||
// DetachDisk removes scsi device file such as /dev/sdX from the node.
|
||||
func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
|
||||
var devices []string
|
||||
// devicePath might be like /dev/mapper/mpathX. Find destination.
|
||||
dstPath, err := c.io.EvalSymlinks(devicePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Find slave
|
||||
if strings.HasPrefix(dstPath, "/dev/dm-") {
|
||||
devices = c.deviceUtil.FindSlaveDevicesOnMultipath(dstPath)
|
||||
} else {
|
||||
// Add single devicepath to devices
|
||||
devices = append(devices, dstPath)
|
||||
}
|
||||
glog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices)
|
||||
var lastErr error
|
||||
for _, device := range devices {
|
||||
err := util.detachFCDisk(c.io, device)
|
||||
if err != nil {
|
||||
glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
|
||||
lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
|
||||
}
|
||||
}
|
||||
if lastErr != nil {
|
||||
glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
|
||||
return lastErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// detachFCDisk removes scsi device file such as /dev/sdX from the node.
|
||||
func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error {
|
||||
// Remove scsi device from the node.
|
||||
if !strings.HasPrefix(devicePath, "/dev/") {
|
||||
return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath)
|
||||
}
|
||||
arr := strings.Split(devicePath, "/")
|
||||
dev := arr[len(arr)-1]
|
||||
removeFromScsiSubsystem(dev, io)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachBlockFCDisk detaches a volume from kubelet node, removes scsi device file
|
||||
// such as /dev/sdX from the node, and then removes loopback for the scsi device.
|
||||
func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
|
||||
// Check if devicePath is valid
|
||||
if len(devicePath) != 0 {
|
||||
if pathExists, pathErr := checkPathExists(devicePath); !pathExists || pathErr != nil {
|
||||
return pathErr
|
||||
}
|
||||
} else {
|
||||
// TODO: FC plugin can't obtain the devicePath from kubelet because devicePath
|
||||
// in volume object isn't updated when volume is attached to kubelet node.
|
||||
glog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath)
|
||||
}
|
||||
|
||||
// Check if global map path is valid
|
||||
// global map path examples:
|
||||
// wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/
|
||||
// wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/
|
||||
if pathExists, pathErr := checkPathExists(mapPath); !pathExists || pathErr != nil {
|
||||
return pathErr
|
||||
}
|
||||
|
||||
// Retrieve volume plugin dependent path like '50060e801049cfd1-lun-0' from global map path
|
||||
arr := strings.Split(mapPath, "/")
|
||||
if len(arr) < 1 {
|
||||
return fmt.Errorf("Fail to retrieve volume plugin information from global map path: %v", mapPath)
|
||||
}
|
||||
volumeInfo := arr[len(arr)-1]
|
||||
|
||||
// Search symbolick link which matches volumeInfo under /dev/disk/by-path or /dev/disk/by-id
|
||||
// then find destination device path from the link
|
||||
searchPath := byID
|
||||
if strings.Contains(volumeInfo, "-lun-") {
|
||||
searchPath = byPath
|
||||
}
|
||||
fis, err := ioutil.ReadDir(searchPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
if strings.Contains(fi.Name(), volumeInfo) {
|
||||
devicePath = path.Join(searchPath, fi.Name())
|
||||
glog.V(5).Infof("fc: updated devicePath: %s", devicePath)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(devicePath) == 0 {
|
||||
return fmt.Errorf("fc: failed to find corresponding device from searchPath: %v", searchPath)
|
||||
}
|
||||
dstPath, err := c.io.EvalSymlinks(devicePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath)
|
||||
|
||||
// Get loopback device which takes fd lock for device beofore detaching a volume from node.
|
||||
// TODO: This is a workaround for issue #54108
|
||||
// Currently local attach plugins such as FC, iSCSI, RBD can't obtain devicePath during
|
||||
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
|
||||
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
||||
// local attach plugins needs to remove loopback device during TearDownDevice().
|
||||
var devices []string
|
||||
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath)
|
||||
if len(dm) != 0 {
|
||||
dstPath = dm
|
||||
}
|
||||
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath)
|
||||
if err != nil {
|
||||
if err.Error() != volumepathhandler.ErrDeviceNotFound {
|
||||
return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
|
||||
}
|
||||
glog.Warning("fc: loopback for destination path: %s not found", dstPath)
|
||||
}
|
||||
|
||||
// Detach volume from kubelet node
|
||||
if len(dm) != 0 {
|
||||
// Find all devices which are managed by multipath
|
||||
devices = c.deviceUtil.FindSlaveDevicesOnMultipath(dm)
|
||||
} else {
|
||||
// Add single device path to devices
|
||||
devices = append(devices, dstPath)
|
||||
}
|
||||
var lastErr error
|
||||
for _, device := range devices {
|
||||
err = util.detachFCDisk(c.io, device)
|
||||
if err != nil {
|
||||
glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
|
||||
lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
|
||||
}
|
||||
}
|
||||
if lastErr != nil {
|
||||
glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
|
||||
return lastErr
|
||||
}
|
||||
if len(loop) != 0 {
|
||||
// The volume was successfully detached from node. We can safely remove the loopback.
|
||||
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to remove loopback :%v, err: %v", loop, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPathExists(path string) (bool, error) {
|
||||
if pathExists, pathErr := volumeutil.PathExists(path); pathErr != nil {
|
||||
return pathExists, fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmap skipped because path does not exist: %v", path)
|
||||
return pathExists, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
118
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util_test.go
generated
vendored
118
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util_test.go
generated
vendored
@ -1,118 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type fakeFileInfo struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Mode() os.FileMode {
|
||||
return 777
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) ModTime() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
func (fi *fakeFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (fi *fakeFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeIOHandler struct{}
|
||||
|
||||
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
switch dirname {
|
||||
case "/dev/disk/by-path/":
|
||||
f := &fakeFileInfo{
|
||||
name: "pci-0000:41:00.0-fc-0x500a0981891b8dc5-lun-0",
|
||||
}
|
||||
return []os.FileInfo{f}, nil
|
||||
case "/sys/block/":
|
||||
f := &fakeFileInfo{
|
||||
name: "dm-1",
|
||||
}
|
||||
return []os.FileInfo{f}, nil
|
||||
case "/dev/disk/by-id/":
|
||||
f := &fakeFileInfo{
|
||||
name: "scsi-3600508b400105e210000900000490000",
|
||||
}
|
||||
return []os.FileInfo{f}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) Lstat(name string) (os.FileInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) EvalSymlinks(path string) (string, error) {
|
||||
return "/dev/sda", nil
|
||||
}
|
||||
|
||||
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestSearchDisk(t *testing.T) {
|
||||
fakeMounter := fcDiskMounter{
|
||||
fcDisk: &fcDisk{
|
||||
wwns: []string{"500a0981891b8dc5"},
|
||||
lun: "0",
|
||||
io: &fakeIOHandler{},
|
||||
},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}
|
||||
devicePath, error := searchDisk(fakeMounter)
|
||||
// if no disk matches input wwn and lun, exit
|
||||
if devicePath == "" || error != nil {
|
||||
t.Errorf("no fc disk found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSearchDiskWWID(t *testing.T) {
|
||||
fakeMounter := fcDiskMounter{
|
||||
fcDisk: &fcDisk{
|
||||
wwids: []string{"3600508b400105e210000900000490000"},
|
||||
io: &fakeIOHandler{},
|
||||
},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}
|
||||
devicePath, error := searchDisk(fakeMounter)
|
||||
// if no disk matches input wwid, exit
|
||||
if devicePath == "" || error != nil {
|
||||
t.Errorf("no fc disk found")
|
||||
}
|
||||
}
|
85
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD
generated
vendored
85
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD
generated
vendored
@ -1,85 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"attacher-defaults.go",
|
||||
"detacher.go",
|
||||
"detacher-defaults.go",
|
||||
"driver-call.go",
|
||||
"fake_watcher.go",
|
||||
"mounter.go",
|
||||
"mounter-defaults.go",
|
||||
"plugin.go",
|
||||
"plugin-defaults.go",
|
||||
"probe.go",
|
||||
"unmounter.go",
|
||||
"unmounter-defaults.go",
|
||||
"util.go",
|
||||
"volume.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/flexvolume",
|
||||
deps = [
|
||||
"//pkg/util/filesystem:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"common_test.go",
|
||||
"detacher_test.go",
|
||||
"driver-call_test.go",
|
||||
"flexvolume_test.go",
|
||||
"mounter_test.go",
|
||||
"plugin_test.go",
|
||||
"probe_test.go",
|
||||
"unmounter_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/filesystem:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
15
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS
generated
vendored
@ -1,15 +0,0 @@
|
||||
approvers:
|
||||
- chakri-nelluri
|
||||
- saad-ali
|
||||
- MikaelCluseau
|
||||
reviewers:
|
||||
- ivan4th
|
||||
- rata
|
||||
- sjenning
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
||||
- chakri-nelluri
|
||||
- MikaelCluseau
|
73
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go
generated
vendored
73
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go
generated
vendored
@ -1,73 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
type attacherDefaults flexVolumeAttacher
|
||||
|
||||
// Attach is part of the volume.Attacher interface
|
||||
func (a *attacherDefaults) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// WaitForAttach is part of the volume.Attacher interface
|
||||
func (a *attacherDefaults) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath)
|
||||
return devicePath, nil
|
||||
}
|
||||
|
||||
// GetDeviceMountPath is part of the volume.Attacher interface
|
||||
func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir string) (string, error) {
|
||||
return a.plugin.getDeviceMountPath(spec)
|
||||
}
|
||||
|
||||
// MountDevice is part of the volume.Attacher interface
|
||||
func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
|
||||
|
||||
volSourceFSType, err := getFSType(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
readOnly, err := getReadOnly(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := make([]string, 0)
|
||||
|
||||
if readOnly {
|
||||
options = append(options, "ro")
|
||||
} else {
|
||||
options = append(options, "rw")
|
||||
}
|
||||
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: a.plugin.host.GetExec(a.plugin.GetPluginName())}
|
||||
|
||||
return diskMounter.FormatAndMount(devicePath, deviceMountPath, volSourceFSType, options)
|
||||
}
|
121
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go
generated
vendored
121
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
type flexVolumeAttacher struct {
|
||||
plugin *flexVolumeAttachablePlugin
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &flexVolumeAttacher{}
|
||||
|
||||
// Attach is part of the volume.Attacher interface
|
||||
func (a *flexVolumeAttacher) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {
|
||||
|
||||
call := a.plugin.NewDriverCall(attachCmd)
|
||||
call.AppendSpec(spec, a.plugin.host, nil)
|
||||
call.Append(string(hostName))
|
||||
|
||||
status, err := call.Run()
|
||||
if isCmdNotSupportedErr(err) {
|
||||
return (*attacherDefaults)(a).Attach(spec, hostName)
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return status.DevicePath, err
|
||||
}
|
||||
|
||||
// WaitForAttach is part of the volume.Attacher interface
|
||||
func (a *flexVolumeAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
||||
call := a.plugin.NewDriverCallWithTimeout(waitForAttachCmd, timeout)
|
||||
call.Append(devicePath)
|
||||
call.AppendSpec(spec, a.plugin.host, nil)
|
||||
|
||||
status, err := call.Run()
|
||||
if isCmdNotSupportedErr(err) {
|
||||
return (*attacherDefaults)(a).WaitForAttach(spec, devicePath, timeout)
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return status.DevicePath, nil
|
||||
}
|
||||
|
||||
// GetDeviceMountPath is part of the volume.Attacher interface
|
||||
func (a *flexVolumeAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
return a.plugin.getDeviceMountPath(spec)
|
||||
}
|
||||
|
||||
// MountDevice is part of the volume.Attacher interface
|
||||
func (a *flexVolumeAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
// Mount only once.
|
||||
alreadyMounted, err := prepareForMount(a.plugin.host.GetMounter(a.plugin.GetPluginName()), deviceMountPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if alreadyMounted {
|
||||
return nil
|
||||
}
|
||||
|
||||
call := a.plugin.NewDriverCall(mountDeviceCmd)
|
||||
call.Append(deviceMountPath)
|
||||
call.Append(devicePath)
|
||||
call.AppendSpec(spec, a.plugin.host, nil)
|
||||
|
||||
_, err = call.Run()
|
||||
if isCmdNotSupportedErr(err) {
|
||||
// Devicepath is empty if the plugin does not support attach calls. Ignore mountDevice calls if the
|
||||
// plugin does not implement attach interface.
|
||||
if devicePath != "" {
|
||||
return (*attacherDefaults)(a).MountDevice(spec, devicePath, deviceMountPath, a.plugin.host.GetMounter(a.plugin.GetPluginName()))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *flexVolumeAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
for _, spec := range specs {
|
||||
volumesAttachedCheck[spec] = true
|
||||
|
||||
call := a.plugin.NewDriverCall(isAttached)
|
||||
call.AppendSpec(spec, a.plugin.host, nil)
|
||||
call.Append(string(nodeName))
|
||||
|
||||
status, err := call.Run()
|
||||
if isCmdNotSupportedErr(err) {
|
||||
return nil, nil
|
||||
} else if err == nil {
|
||||
if !status.Attached {
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name())
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
76
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher_test.go
generated
vendored
76
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher_test.go
generated
vendored
@ -1,76 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
func TestAttach(t *testing.T) {
|
||||
spec := fakeVolumeSpec()
|
||||
|
||||
plugin, _ := testPlugin()
|
||||
plugin.runner = fakeRunner(
|
||||
assertDriverCall(t, notSupportedOutput(), attachCmd,
|
||||
specJson(plugin, spec, nil), "localhost"),
|
||||
)
|
||||
|
||||
a, _ := plugin.NewAttacher()
|
||||
a.Attach(spec, "localhost")
|
||||
}
|
||||
|
||||
func TestWaitForAttach(t *testing.T) {
|
||||
spec := fakeVolumeSpec()
|
||||
var pod *v1.Pod
|
||||
plugin, _ := testPlugin()
|
||||
plugin.runner = fakeRunner(
|
||||
assertDriverCall(t, notSupportedOutput(), waitForAttachCmd, "/dev/sdx",
|
||||
specJson(plugin, spec, nil)),
|
||||
)
|
||||
|
||||
a, _ := plugin.NewAttacher()
|
||||
a.WaitForAttach(spec, "/dev/sdx", pod, 1*time.Second)
|
||||
}
|
||||
|
||||
func TestMountDevice(t *testing.T) {
|
||||
spec := fakeVolumeSpec()
|
||||
|
||||
plugin, rootDir := testPlugin()
|
||||
plugin.runner = fakeRunner(
|
||||
assertDriverCall(t, notSupportedOutput(), mountDeviceCmd, rootDir+"/mount-dir", "/dev/sdx",
|
||||
specJson(plugin, spec, nil)),
|
||||
)
|
||||
|
||||
a, _ := plugin.NewAttacher()
|
||||
a.MountDevice(spec, "/dev/sdx", rootDir+"/mount-dir")
|
||||
}
|
||||
|
||||
func TestIsVolumeAttached(t *testing.T) {
|
||||
spec := fakeVolumeSpec()
|
||||
|
||||
plugin, _ := testPlugin()
|
||||
plugin.runner = fakeRunner(
|
||||
assertDriverCall(t, notSupportedOutput(), isAttached, specJson(plugin, spec, nil), "localhost"),
|
||||
)
|
||||
a, _ := plugin.NewAttacher()
|
||||
specs := []*volume.Spec{spec}
|
||||
a.VolumesAreAttached(specs, "localhost")
|
||||
}
|
142
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/common_test.go
generated
vendored
142
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/common_test.go
generated
vendored
@ -1,142 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/utils/exec"
|
||||
fakeexec "k8s.io/utils/exec/testing"
|
||||
)
|
||||
|
||||
func testPlugin() (*flexVolumeAttachablePlugin, string) {
|
||||
rootDir, err := utiltesting.MkTmpdir("flexvolume_test")
|
||||
if err != nil {
|
||||
panic("error creating temp dir: " + err.Error())
|
||||
}
|
||||
return &flexVolumeAttachablePlugin{
|
||||
flexVolumePlugin: &flexVolumePlugin{
|
||||
driverName: "test",
|
||||
execPath: "/plugin",
|
||||
host: volumetesting.NewFakeVolumeHost(rootDir, nil, nil),
|
||||
unsupportedCommands: []string{},
|
||||
},
|
||||
}, rootDir
|
||||
}
|
||||
|
||||
func assertDriverCall(t *testing.T, output fakeexec.FakeCombinedOutputAction, expectedCommand string, expectedArgs ...string) fakeexec.FakeCommandAction {
|
||||
return func(cmd string, args ...string) exec.Cmd {
|
||||
if cmd != "/plugin/test" {
|
||||
t.Errorf("Wrong executable called: got %v, expected %v", cmd, "/plugin/test")
|
||||
}
|
||||
if args[0] != expectedCommand {
|
||||
t.Errorf("Wrong command called: got %v, expected %v", args[0], expectedCommand)
|
||||
}
|
||||
cmdArgs := args[1:]
|
||||
if !sameArgs(cmdArgs, expectedArgs) {
|
||||
t.Errorf("Wrong args for %s: got %v, expected %v", args[0], cmdArgs, expectedArgs)
|
||||
}
|
||||
return &fakeexec.FakeCmd{
|
||||
Argv: args,
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{output},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fakeRunner(fakeCommands ...fakeexec.FakeCommandAction) exec.Interface {
|
||||
return &fakeexec.FakeExec{
|
||||
CommandScript: fakeCommands,
|
||||
}
|
||||
}
|
||||
|
||||
func fakeResultOutput(result interface{}) fakeexec.FakeCombinedOutputAction {
|
||||
return func() ([]byte, error) {
|
||||
bytes, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
panic("Unable to marshal result: " + err.Error())
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
}
|
||||
|
||||
func successOutput() fakeexec.FakeCombinedOutputAction {
|
||||
return fakeResultOutput(&DriverStatus{StatusSuccess, "", "", "", true, nil})
|
||||
}
|
||||
|
||||
func notSupportedOutput() fakeexec.FakeCombinedOutputAction {
|
||||
return fakeResultOutput(&DriverStatus{StatusNotSupported, "", "", "", false, nil})
|
||||
}
|
||||
|
||||
func sameArgs(args, expectedArgs []string) bool {
|
||||
if len(args) != len(expectedArgs) {
|
||||
return false
|
||||
}
|
||||
for i, v := range args {
|
||||
if v != expectedArgs[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func fakeVolumeSpec() *volume.Spec {
|
||||
vol := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FlexVolume: &v1.FlexVolumeSource{
|
||||
Driver: "kubernetes.io/fakeAttacher",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(vol)
|
||||
}
|
||||
|
||||
func fakePersistentVolumeSpec() *volume.Spec {
|
||||
vol := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vol1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FlexVolume: &v1.FlexPersistentVolumeSource{
|
||||
Driver: "kubernetes.io/fakeAttacher",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(vol, false)
|
||||
}
|
||||
|
||||
func specJson(plugin *flexVolumeAttachablePlugin, spec *volume.Spec, extraOptions map[string]string) string {
|
||||
o, err := NewOptionsForDriver(spec, plugin.host, extraOptions)
|
||||
if err != nil {
|
||||
panic("Failed to convert spec: " + err.Error())
|
||||
}
|
||||
bytes, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
panic("Unable to marshal result: " + err.Error())
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
45
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type detacherDefaults flexVolumeDetacher
|
||||
|
||||
// Detach is part of the volume.Detacher interface.
|
||||
func (d *detacherDefaults) Detach(volumeName string, hostName types.NodeName) error {
|
||||
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDetach is part of the volume.Detacher interface.
|
||||
func (d *detacherDefaults) WaitForDetach(devicePath string, timeout time.Duration) error {
|
||||
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default WaitForDetach for device ", devicePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmountDevice is part of the volume.Detacher interface.
|
||||
func (d *detacherDefaults) UnmountDevice(deviceMountPath string) error {
|
||||
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath)
|
||||
return util.UnmountPath(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()))
|
||||
}
|
85
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go
generated
vendored
85
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type flexVolumeDetacher struct {
|
||||
plugin *flexVolumeAttachablePlugin
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &flexVolumeDetacher{}
|
||||
|
||||
// Detach is part of the volume.Detacher interface.
|
||||
func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName) error {
|
||||
|
||||
call := d.plugin.NewDriverCall(detachCmd)
|
||||
call.Append(volumeName)
|
||||
call.Append(string(hostName))
|
||||
|
||||
_, err := call.Run()
|
||||
if isCmdNotSupportedErr(err) {
|
||||
return (*detacherDefaults)(d).Detach(volumeName, hostName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmountDevice is part of the volume.Detacher interface.
|
||||
func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
|
||||
if pathExists, pathErr := util.PathExists(deviceMountPath); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
notmnt, err := isNotMounted(d.plugin.host.GetMounter(d.plugin.GetPluginName()), deviceMountPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if notmnt {
|
||||
glog.Warningf("Warning: Path: %v already unmounted", deviceMountPath)
|
||||
} else {
|
||||
call := d.plugin.NewDriverCall(unmountDeviceCmd)
|
||||
call.Append(deviceMountPath)
|
||||
|
||||
_, err := call.Run()
|
||||
if isCmdNotSupportedErr(err) {
|
||||
err = (*detacherDefaults)(d).UnmountDevice(deviceMountPath)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Flexvolume driver may remove the directory. Ignore if it does.
|
||||
if pathExists, pathErr := util.PathExists(deviceMountPath); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
return nil
|
||||
}
|
||||
return os.Remove(deviceMountPath)
|
||||
}
|
43
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher_test.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher_test.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDetach(t *testing.T) {
|
||||
plugin, _ := testPlugin()
|
||||
plugin.runner = fakeRunner(
|
||||
assertDriverCall(t, notSupportedOutput(), detachCmd,
|
||||
"sdx", "localhost"),
|
||||
)
|
||||
|
||||
d, _ := plugin.NewDetacher()
|
||||
d.Detach("sdx", "localhost")
|
||||
}
|
||||
|
||||
func TestUnmountDevice(t *testing.T) {
|
||||
plugin, rootDir := testPlugin()
|
||||
plugin.runner = fakeRunner(
|
||||
assertDriverCall(t, notSupportedOutput(), unmountDeviceCmd,
|
||||
rootDir+"/mount-dir"),
|
||||
)
|
||||
|
||||
d, _ := plugin.NewDetacher()
|
||||
d.UnmountDevice(rootDir + "/mount-dir")
|
||||
}
|
263
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go
generated
vendored
263
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go
generated
vendored
@ -1,263 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
// Driver calls
|
||||
initCmd = "init"
|
||||
getVolumeNameCmd = "getvolumename"
|
||||
|
||||
isAttached = "isattached"
|
||||
|
||||
attachCmd = "attach"
|
||||
waitForAttachCmd = "waitforattach"
|
||||
mountDeviceCmd = "mountdevice"
|
||||
|
||||
detachCmd = "detach"
|
||||
unmountDeviceCmd = "unmountdevice"
|
||||
|
||||
mountCmd = "mount"
|
||||
unmountCmd = "unmount"
|
||||
|
||||
// Option keys
|
||||
optionFSType = "kubernetes.io/fsType"
|
||||
optionReadWrite = "kubernetes.io/readwrite"
|
||||
optionKeySecret = "kubernetes.io/secret"
|
||||
optionFSGroup = "kubernetes.io/fsGroup"
|
||||
optionMountsDir = "kubernetes.io/mountsDir"
|
||||
optionPVorVolumeName = "kubernetes.io/pvOrVolumeName"
|
||||
|
||||
optionKeyPodName = "kubernetes.io/pod.name"
|
||||
optionKeyPodNamespace = "kubernetes.io/pod.namespace"
|
||||
optionKeyPodUID = "kubernetes.io/pod.uid"
|
||||
|
||||
optionKeyServiceAccountName = "kubernetes.io/serviceAccount.name"
|
||||
)
|
||||
|
||||
const (
|
||||
// StatusSuccess represents the successful completion of command.
|
||||
StatusSuccess = "Success"
|
||||
// StatusNotSupported represents that the command is not supported.
|
||||
StatusNotSupported = "Not supported"
|
||||
)
|
||||
|
||||
var (
|
||||
TimeoutError = fmt.Errorf("Timeout")
|
||||
)
|
||||
|
||||
// DriverCall implements the basic contract between FlexVolume and its driver.
|
||||
// The caller is responsible for providing the required args.
|
||||
type DriverCall struct {
|
||||
Command string
|
||||
Timeout time.Duration
|
||||
plugin *flexVolumePlugin
|
||||
args []string
|
||||
}
|
||||
|
||||
func (plugin *flexVolumePlugin) NewDriverCall(command string) *DriverCall {
|
||||
return plugin.NewDriverCallWithTimeout(command, 0)
|
||||
}
|
||||
|
||||
func (plugin *flexVolumePlugin) NewDriverCallWithTimeout(command string, timeout time.Duration) *DriverCall {
|
||||
return &DriverCall{
|
||||
Command: command,
|
||||
Timeout: timeout,
|
||||
plugin: plugin,
|
||||
args: []string{command},
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *DriverCall) Append(arg string) {
|
||||
dc.args = append(dc.args, arg)
|
||||
}
|
||||
|
||||
func (dc *DriverCall) AppendSpec(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) error {
|
||||
optionsForDriver, err := NewOptionsForDriver(spec, host, extraOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(optionsForDriver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to marshal spec, error: %s", err.Error())
|
||||
}
|
||||
|
||||
dc.Append(string(jsonBytes))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dc *DriverCall) Run() (*DriverStatus, error) {
|
||||
if dc.plugin.isUnsupported(dc.Command) {
|
||||
return nil, errors.New(StatusNotSupported)
|
||||
}
|
||||
execPath := dc.plugin.getExecutable()
|
||||
|
||||
cmd := dc.plugin.runner.Command(execPath, dc.args...)
|
||||
|
||||
timeout := false
|
||||
if dc.Timeout > 0 {
|
||||
timer := time.AfterFunc(dc.Timeout, func() {
|
||||
timeout = true
|
||||
cmd.Stop()
|
||||
})
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
output, execErr := cmd.CombinedOutput()
|
||||
if execErr != nil {
|
||||
if timeout {
|
||||
return nil, TimeoutError
|
||||
}
|
||||
_, err := handleCmdResponse(dc.Command, output)
|
||||
if err == nil {
|
||||
glog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", execPath, execErr)
|
||||
return nil, execErr
|
||||
}
|
||||
if isCmdNotSupportedErr(err) {
|
||||
dc.plugin.unsupported(dc.Command)
|
||||
} else {
|
||||
glog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
status, err := handleCmdResponse(dc.Command, output)
|
||||
if err != nil {
|
||||
if isCmdNotSupportedErr(err) {
|
||||
dc.plugin.unsupported(dc.Command)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// OptionsForDriver represents the spec given to the driver.
|
||||
type OptionsForDriver map[string]string
|
||||
|
||||
func NewOptionsForDriver(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) (OptionsForDriver, error) {
|
||||
|
||||
volSourceFSType, err := getFSType(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
readOnly, err := getReadOnly(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volSourceOptions, err := getOptions(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := map[string]string{}
|
||||
|
||||
options[optionFSType] = volSourceFSType
|
||||
|
||||
if readOnly {
|
||||
options[optionReadWrite] = "ro"
|
||||
} else {
|
||||
options[optionReadWrite] = "rw"
|
||||
}
|
||||
|
||||
options[optionPVorVolumeName] = spec.Name()
|
||||
|
||||
for key, value := range extraOptions {
|
||||
options[key] = value
|
||||
}
|
||||
|
||||
for key, value := range volSourceOptions {
|
||||
options[key] = value
|
||||
}
|
||||
|
||||
return OptionsForDriver(options), nil
|
||||
}
|
||||
|
||||
// DriverStatus represents the return value of the driver callout.
|
||||
type DriverStatus struct {
|
||||
// Status of the callout. One of "Success", "Failure" or "Not supported".
|
||||
Status string `json:"status"`
|
||||
// Reason for success/failure.
|
||||
Message string `json:"message,omitempty"`
|
||||
// Path to the device attached. This field is valid only for attach calls.
|
||||
// ie: /dev/sdx
|
||||
DevicePath string `json:"device,omitempty"`
|
||||
// Cluster wide unique name of the volume.
|
||||
VolumeName string `json:"volumeName,omitempty"`
|
||||
// Represents volume is attached on the node
|
||||
Attached bool `json:"attached,omitempty"`
|
||||
// Returns capabilities of the driver.
|
||||
// By default we assume all the capabilities are supported.
|
||||
// If the plugin does not support a capability, it can return false for that capability.
|
||||
Capabilities *DriverCapabilities `json:",omitempty"`
|
||||
}
|
||||
|
||||
type DriverCapabilities struct {
|
||||
Attach bool `json:"attach"`
|
||||
SELinuxRelabel bool `json:"selinuxRelabel"`
|
||||
}
|
||||
|
||||
func defaultCapabilities() *DriverCapabilities {
|
||||
return &DriverCapabilities{
|
||||
Attach: true,
|
||||
SELinuxRelabel: true,
|
||||
}
|
||||
}
|
||||
|
||||
// isCmdNotSupportedErr checks if the error corresponds to command not supported by
|
||||
// driver.
|
||||
func isCmdNotSupportedErr(err error) bool {
|
||||
if err != nil && err.Error() == StatusNotSupported {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// handleCmdResponse processes the command output and returns the appropriate
|
||||
// error code or message.
|
||||
func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) {
|
||||
status := DriverStatus{
|
||||
Capabilities: defaultCapabilities(),
|
||||
}
|
||||
if err := json.Unmarshal(output, &status); err != nil {
|
||||
glog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error())
|
||||
return nil, err
|
||||
} else if status.Status == StatusNotSupported {
|
||||
glog.V(5).Infof("%s command is not supported by the driver", cmd)
|
||||
return nil, errors.New(status.Status)
|
||||
} else if status.Status != StatusSuccess {
|
||||
errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message)
|
||||
glog.Errorf(errMsg)
|
||||
return nil, fmt.Errorf("%s", errMsg)
|
||||
}
|
||||
|
||||
return &status, nil
|
||||
}
|
32
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call_test.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call_test.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flexvolume
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHandleResponseDefaults(t *testing.T) {
|
||||
ds, err := handleCmdResponse("test", []byte(`{"status": "Success"}`))
|
||||
if err != nil {
|
||||
t.Error("error: ", err)
|
||||
}
|
||||
|
||||
if *ds.Capabilities != *defaultCapabilities() {
|
||||
t.Error("wrong default capabilities: ", *ds.Capabilities)
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user