mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
vendor updates
This commit is contained in:
66
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
66
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
@ -1,10 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -16,32 +10,58 @@ go_library(
|
||||
"metrics_nil.go",
|
||||
"metrics_statfs.go",
|
||||
"plugins.go",
|
||||
"util.go",
|
||||
"volume.go",
|
||||
"volume_unsupported.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"volume_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"volume_unsupported.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/volume",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/fs:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -51,20 +71,12 @@ go_test(
|
||||
srcs = [
|
||||
"metrics_nil_test.go",
|
||||
"plugins_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -73,18 +85,17 @@ go_test(
|
||||
srcs = [
|
||||
"metrics_statfs_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"metrics_du_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/volume_test",
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
@ -135,4 +146,5 @@ filegroup(
|
||||
"//pkg/volume/vsphere_volume:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
@ -11,6 +11,7 @@ go_library(
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"aws_ebs.go",
|
||||
"aws_ebs_block.go",
|
||||
"aws_util.go",
|
||||
"doc.go",
|
||||
],
|
||||
@ -22,7 +23,7 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -35,10 +36,10 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"aws_ebs_block_test.go",
|
||||
"aws_ebs_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
type awsElasticBlockStoreAttacher struct {
|
||||
@ -168,19 +167,15 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
|
||||
if devicePath != "" {
|
||||
devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath)
|
||||
path, err := verifyDevicePath(devicePaths)
|
||||
if err != nil {
|
||||
// Log error, if any, and continue checking periodically. See issue #11321
|
||||
glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err)
|
||||
} else if path != "" {
|
||||
// A device path has successfully been created for the PD
|
||||
glog.Infof("Successfully found attached AWS Volume %q.", volumeID)
|
||||
return path, nil
|
||||
}
|
||||
} else {
|
||||
glog.V(5).Infof("AWS Volume (%q) is not attached yet", volumeID)
|
||||
devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath)
|
||||
path, err := verifyDevicePath(devicePaths)
|
||||
if err != nil {
|
||||
// Log error, if any, and continue checking periodically. See issue #11321
|
||||
glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err)
|
||||
} else if path != "" {
|
||||
// A device path has successfully been created for the PD
|
||||
glog.Infof("Successfully found attached AWS Volume %q.", volumeID)
|
||||
return path, nil
|
||||
}
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("Could not find attached AWS Volume %q. Timeout waiting for mount paths to be created.", volumeID)
|
||||
@ -223,8 +218,8 @@ func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, dev
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host)
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
@ -34,7 +34,6 @@ import (
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
@ -134,7 +133,7 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec,
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
@ -456,7 +455,7 @@ type awsElasticBlockStoreProvisioner struct {
|
||||
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
|
||||
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
@ -475,7 +474,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, err
|
||||
Name: c.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: "aws-ebs-dynamic-provisioner",
|
||||
util.VolumeDynamicallyCreatedByKey: "aws-ebs-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
|
175
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go
generated
vendored
Normal file
175
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.BlockVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName)
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("globalMapPathUUID: %s", globalMapPathUUID)
|
||||
|
||||
globalMapPath := filepath.Dir(globalMapPathUUID)
|
||||
if len(globalMapPath) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
|
||||
}
|
||||
|
||||
return getVolumeSpecFromGlobalMapPath(globalMapPath)
|
||||
}
|
||||
|
||||
func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) {
|
||||
// Get volume spec information from globalMapPath
|
||||
// globalMapPath example:
|
||||
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
|
||||
// plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX
|
||||
vID := filepath.Base(globalMapPath)
|
||||
if len(vID) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get volumeID from global path=%s", globalMapPath)
|
||||
}
|
||||
if !strings.Contains(vID, "vol-") {
|
||||
return nil, fmt.Errorf("failed to get volumeID from global path=%s, invalid volumeID format = %s", globalMapPath, vID)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
awsVolume := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: vID,
|
||||
},
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(awsVolume, true), nil
|
||||
}
|
||||
|
||||
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
|
||||
func (plugin *awsElasticBlockStorePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
|
||||
// Pass empty string as dummy uid since uid isn't used in the case.
|
||||
var uid types.UID
|
||||
if pod != nil {
|
||||
uid = pod.UID
|
||||
}
|
||||
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
|
||||
ebs, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
|
||||
partition := ""
|
||||
if ebs.Partition != 0 {
|
||||
partition = strconv.Itoa(int(ebs.Partition))
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreMapper{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
volumeID: volumeID,
|
||||
partition: partition,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
},
|
||||
readOnly: readOnly}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return plugin.newUnmapperInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) newUnmapperInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) {
|
||||
return &awsElasticBlockStoreUnmapper{
|
||||
awsElasticBlockStore: &awsElasticBlockStore{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (c *awsElasticBlockStoreUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type awsElasticBlockStoreUnmapper struct {
|
||||
*awsElasticBlockStore
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &awsElasticBlockStoreUnmapper{}
|
||||
|
||||
type awsElasticBlockStoreMapper struct {
|
||||
*awsElasticBlockStore
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &awsElasticBlockStoreMapper{}
|
||||
|
||||
func (b *awsElasticBlockStoreMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
|
||||
// plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX
|
||||
func (ebs *awsElasticBlockStore) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(ebs.plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName), string(volumeSource.VolumeID)), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~aws
|
||||
func (ebs *awsElasticBlockStore) GetPodDeviceMapPath() (string, string) {
|
||||
name := awsElasticBlockStorePluginName
|
||||
return ebs.plugin.host.GetPodVolumeDeviceDir(ebs.podUID, kstrings.EscapeQualifiedNameForDisk(name)), ebs.volName
|
||||
}
|
145
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block_test.go
generated
vendored
Normal file
145
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block_test.go
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testVolName = "vol-1234"
|
||||
testPVName = "pv1"
|
||||
testGlobalPath = "plugins/kubernetes.io/aws-ebs/volumeDevices/vol-1234"
|
||||
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~aws-ebs"
|
||||
)
|
||||
|
||||
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
|
||||
// make our test path for fake GlobalMapPath
|
||||
// /tmp symbolized our pluginDir
|
||||
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/gce-pd/volumeDevices/pdVol1
|
||||
tmpVDir, err := utiltesting.MkTmpdir("awsBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
|
||||
|
||||
//Bad Path
|
||||
badspec, err := getVolumeSpecFromGlobalMapPath("")
|
||||
if badspec != nil || err == nil {
|
||||
t.Fatalf("Expected not to get spec from GlobalMapPath but did")
|
||||
}
|
||||
|
||||
// Good Path
|
||||
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath)
|
||||
if spec == nil || err != nil {
|
||||
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
|
||||
}
|
||||
if spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID != testVolName {
|
||||
t.Errorf("Invalid volumeID from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
specMode := spec.PersistentVolume.Spec.VolumeMode
|
||||
if &specMode == nil {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v - %v", &specMode, block)
|
||||
}
|
||||
if *specMode != block {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v - %v", *specMode, block)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestVolume(readOnly bool, isBlock bool) *volume.Spec {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPVName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: testVolName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
pv.Spec.VolumeMode = &blockMode
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
||||
}
|
||||
|
||||
func TestGetPodAndPluginMapPaths(t *testing.T) {
|
||||
tmpVDir, err := utiltesting.MkTmpdir("awsBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := path.Join(tmpVDir, testPodPath)
|
||||
|
||||
spec := getTestVolume(false, true /*isBlock*/)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
|
||||
plug, err := plugMgr.FindMapperPluginByName(awsElasticBlockStorePluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpVDir)
|
||||
t.Fatalf("Can't find the plugin by name: %q", awsElasticBlockStorePluginName)
|
||||
}
|
||||
if plug.GetPluginName() != awsElasticBlockStorePluginName {
|
||||
t.Fatalf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mapper == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
//GetGlobalMapPath
|
||||
gMapPath, err := mapper.GetGlobalMapPath(spec)
|
||||
if err != nil || len(gMapPath) == 0 {
|
||||
t.Fatalf("Invalid path from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.GCEPersistentDisk.PDName)
|
||||
}
|
||||
if gMapPath != expectedGlobalPath {
|
||||
t.Fatalf("Failed to get GlobalMapPath: %s %s", gMapPath, expectedGlobalPath)
|
||||
}
|
||||
|
||||
//GetPodDeviceMapPath
|
||||
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
|
||||
if gDevicePath != expectedPodPath {
|
||||
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
|
||||
}
|
||||
if gVolName != testPVName {
|
||||
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
|
||||
}
|
||||
}
|
7
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
@ -145,13 +145,6 @@ func TestPlugin(t *testing.T) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fakeManager = &fakePDManager{}
|
||||
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go
generated
vendored
@ -80,12 +80,12 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
|
||||
} else {
|
||||
tags = *c.options.CloudTags
|
||||
}
|
||||
tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
|
||||
tags["Name"] = volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
|
||||
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
// AWS works with gigabytes, convert to GiB with rounding up
|
||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
requestGB := int(volumeutil.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
volumeOptions := &aws.VolumeOptions{
|
||||
CapacityGB: requestGB,
|
||||
Tags: tags,
|
||||
|
36
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD
generated
vendored
@ -11,15 +11,41 @@ go_library(
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"azure_common.go",
|
||||
"azure_common_unsupported.go",
|
||||
"azure_dd.go",
|
||||
"azure_mounter.go",
|
||||
"azure_provision.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"azure_common_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"azure_common_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"azure_common_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
@ -34,7 +60,6 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
@ -66,8 +91,7 @@ go_test(
|
||||
"azure_common_test.go",
|
||||
"azure_dd_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/azure_dd",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
19
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS
generated
vendored
@ -1,11 +1,18 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- feiskyer
|
||||
- karataliu
|
||||
- khenidak
|
||||
- rootfs
|
||||
reviewers:
|
||||
- rootfs
|
||||
- brendandburns
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- jingxu97
|
||||
- msau42
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- feiskyer
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
- karataliu
|
||||
- khenidak
|
||||
- rootfs
|
||||
- saad-ali
|
||||
|
54
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
@ -17,13 +17,13 @@ limitations under the License.
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
@ -36,8 +36,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type azureDiskDetacher struct {
|
||||
@ -60,17 +59,14 @@ var getLunMutex = keymutex.NewKeyMutex()
|
||||
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure disk spec")
|
||||
glog.Warningf("failed to get azure disk spec (%v)", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
instanceid, err := a.cloud.InstanceID(nodeName)
|
||||
instanceid, err := a.cloud.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure instance id")
|
||||
return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
|
||||
}
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
glog.Warningf("failed to get azure instance id (%v)", err)
|
||||
return "", fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(a.plugin.host)
|
||||
@ -96,8 +92,8 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (
|
||||
|
||||
lun, err = diskController.GetNextDiskLun(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no LUN available for instance %q", nodeName)
|
||||
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
|
||||
glog.Warningf("no LUN available for instance %q (%v)", nodeName, err)
|
||||
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err)
|
||||
}
|
||||
glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
|
||||
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
@ -156,7 +152,7 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string,
|
||||
var err error
|
||||
lun, err := strconv.Atoi(devicePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath)
|
||||
return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s (%v)", devicePath, err)
|
||||
}
|
||||
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
@ -180,7 +176,7 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string,
|
||||
|
||||
// did we find it?
|
||||
if newDevicePath != "" {
|
||||
// the curent sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on
|
||||
// the current sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on
|
||||
// Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve
|
||||
// the root case on Azure.
|
||||
formatIfNotFormatted(newDevicePath, *volumeSource.FSType, exec)
|
||||
@ -232,6 +228,19 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str
|
||||
}
|
||||
}
|
||||
|
||||
if !notMnt {
|
||||
// testing original mount point, make sure the mount link is valid
|
||||
if _, err := (&osIOHandler{}).ReadDir(deviceMountPath); err != nil {
|
||||
// mount link is invalid, now unmount and remount later
|
||||
glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err)
|
||||
if err := mounter.Unmount(deviceMountPath); err != nil {
|
||||
glog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err)
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -239,8 +248,8 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str
|
||||
|
||||
options := []string{}
|
||||
if notMnt {
|
||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host)
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
diskMounter := util.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host)
|
||||
mountOptions := util.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
|
||||
@ -258,14 +267,11 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
|
||||
return fmt.Errorf("invalid disk to detach: %q", diskURI)
|
||||
}
|
||||
|
||||
instanceid, err := d.cloud.InstanceID(nodeName)
|
||||
instanceid, err := d.cloud.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no instance id for node %q, skip detaching", nodeName)
|
||||
glog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err)
|
||||
return nil
|
||||
}
|
||||
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
|
||||
glog.V(4).Infof("detach %v from node %q", diskURI, nodeName)
|
||||
|
||||
@ -273,6 +279,10 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getLunMutex.LockKey(instanceid)
|
||||
defer getLunMutex.UnlockKey(instanceid)
|
||||
|
||||
err = diskController.DetachDiskByName("", diskURI, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
|
||||
@ -284,7 +294,7 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
|
||||
|
||||
// UnmountDevice unmounts the volume on the node
|
||||
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
|
||||
err := util.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
|
||||
if err == nil {
|
||||
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
|
||||
} else {
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go
generated
vendored
@ -35,9 +35,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultFSType = "ext4"
|
||||
defaultStorageAccountType = storage.StandardLRS
|
||||
defaultAzureDiskKind = v1.AzureSharedBlobDisk
|
||||
defaultFSType = "ext4"
|
||||
defaultStorageAccountType = storage.StandardLRS
|
||||
defaultAzureDiskKind = v1.AzureSharedBlobDisk
|
||||
defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingNone
|
||||
)
|
||||
|
||||
type dataDisk struct {
|
||||
@ -141,7 +142,7 @@ func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, er
|
||||
|
||||
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {
|
||||
if cachingMode == "" {
|
||||
return v1.AzureDataDiskCachingReadWrite, nil
|
||||
return defaultAzureDataDiskCachingMode, nil
|
||||
}
|
||||
|
||||
if !supportedCachingModes.Has(string(cachingMode)) {
|
||||
|
39
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go
generated
vendored
39
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go
generated
vendored
@ -19,6 +19,7 @@ limitations under the License.
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
libstrings "strings"
|
||||
@ -45,6 +46,28 @@ func listAzureDiskPath(io ioHandler) []string {
|
||||
return azureDiskList
|
||||
}
|
||||
|
||||
// getDiskLinkByDevName get disk link by device name from devLinkPath, e.g. /dev/disk/azure/, /dev/disk/by-id/
|
||||
func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, error) {
|
||||
dirs, err := io.ReadDir(devLinkPath)
|
||||
glog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath)
|
||||
if err == nil {
|
||||
for _, f := range dirs {
|
||||
diskPath := devLinkPath + f.Name()
|
||||
glog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath)
|
||||
link, linkErr := io.Readlink(diskPath)
|
||||
if linkErr != nil {
|
||||
glog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr)
|
||||
continue
|
||||
}
|
||||
if libstrings.HasSuffix(link, devName) {
|
||||
return diskPath, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("device name(%s) is not found under %s", devName, devLinkPath)
|
||||
}
|
||||
return "", fmt.Errorf("read %s error: %v", devLinkPath, err)
|
||||
}
|
||||
|
||||
func scsiHostRescan(io ioHandler, exec mount.Exec) {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsi_path); err == nil {
|
||||
@ -129,15 +152,25 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
|
||||
dir := path.Join(sys_path, name, "block")
|
||||
if dev, err := io.ReadDir(dir); err == nil {
|
||||
found := false
|
||||
devName := dev[0].Name()
|
||||
for _, diskName := range azureDisks {
|
||||
glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName)
|
||||
if string(dev[0].Name()) == diskName {
|
||||
glog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName)
|
||||
if devName == diskName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return "/dev/" + dev[0].Name(), nil
|
||||
devLinkPaths := []string{"/dev/disk/azure/scsi1/", "/dev/disk/by-id/"}
|
||||
for _, devLinkPath := range devLinkPaths {
|
||||
diskPath, err := getDiskLinkByDevName(io, devLinkPath, devName)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath)
|
||||
return diskPath, nil
|
||||
}
|
||||
glog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err)
|
||||
}
|
||||
return "/dev/" + devName, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// interface exposed by the cloud provider implementing Disk functionlity
|
||||
// interface exposed by the cloud provider implementing Disk functionality
|
||||
type DiskController interface {
|
||||
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
|
||||
DeleteBlobDisk(diskUri string) error
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go
generated
vendored
@ -86,8 +86,19 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return err
|
||||
}
|
||||
if !mountPoint {
|
||||
glog.V(4).Infof("azureDisk - already mounted to target %s", dir)
|
||||
return nil
|
||||
// testing original mount point, make sure the mount link is valid
|
||||
_, err := (&osIOHandler{}).ReadDir(dir)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("azureDisk - already mounted to target %s", dir)
|
||||
return nil
|
||||
}
|
||||
// mount link is invalid, now unmount and remount later
|
||||
glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err)
|
||||
if err := mounter.Unmount(dir); err != nil {
|
||||
glog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err)
|
||||
return err
|
||||
}
|
||||
mountPoint = true
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
@ -104,6 +115,10 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
if m.options.MountOptions != nil {
|
||||
options = util.JoinMountOptions(m.options.MountOptions, options)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
|
||||
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
|
||||
@ -140,7 +155,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, err, mountErr)
|
||||
glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr)
|
||||
return mountErr
|
||||
}
|
||||
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type azureDiskProvisioner struct {
|
||||
@ -65,7 +66,7 @@ func (d *azureDiskDeleter) Delete() error {
|
||||
}
|
||||
|
||||
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
supportedModes := p.plugin.GetAccessModes()
|
||||
@ -93,10 +94,10 @@ func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
err error
|
||||
)
|
||||
// maxLength = 79 - (4 for ".vhd") = 75
|
||||
name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
|
||||
name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
requestGB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
|
||||
for k, v := range p.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD
generated
vendored
@ -22,7 +22,6 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
@ -35,14 +34,14 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["azure_file_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/azure_file",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS
generated
vendored
@ -1,11 +1,18 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- feiskyer
|
||||
- karataliu
|
||||
- khenidak
|
||||
- rootfs
|
||||
reviewers:
|
||||
- rootfs
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- saad-ali
|
||||
- feiskyer
|
||||
- jsafrane
|
||||
- jingxu97
|
||||
- karataliu
|
||||
- khenidak
|
||||
- msau42
|
||||
- andyzhangx
|
||||
- rootfs
|
||||
- saad-ali
|
||||
|
54
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
@ -21,17 +21,17 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the primary endpoint for volume plugins
|
||||
@ -45,6 +45,7 @@ type azureFilePlugin struct {
|
||||
|
||||
var _ volume.VolumePlugin = &azureFilePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &azureFilePlugin{}
|
||||
var _ volume.ExpandableVolumePlugin = &azureFilePlugin{}
|
||||
|
||||
const (
|
||||
azureFilePluginName = "kubernetes.io/azure-file"
|
||||
@ -121,7 +122,7 @@ func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod
|
||||
secretName: secretName,
|
||||
shareName: share,
|
||||
readOnly: readOnly,
|
||||
mountOptions: volume.MountOptionFromSpec(spec),
|
||||
mountOptions: volutil.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -139,6 +140,41 @@ func (plugin *azureFilePlugin) newUnmounterInternal(volName string, podUID types
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) RequiresFSResize() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) ExpandVolumeDevice(
|
||||
spec *volume.Spec,
|
||||
newSize resource.Quantity,
|
||||
oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
|
||||
if spec.PersistentVolume != nil || spec.PersistentVolume.Spec.AzureFile == nil {
|
||||
return oldSize, fmt.Errorf("invalid PV spec")
|
||||
}
|
||||
shareName := spec.PersistentVolume.Spec.AzureFile.ShareName
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
secretName, secretNamespace, err := getSecretNameAndNamespace(spec, spec.PersistentVolume.Spec.ClaimRef.Namespace)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
accountName, accountKey, err := (&azureSvc{}).GetAzureCredentials(plugin.host, secretNamespace, secretName)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
if err := azure.ResizeFileShare(accountName, accountKey, shareName, int(volutil.RoundUpToGiB(newSize))); err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
return newSize, nil
|
||||
}
|
||||
|
||||
func (plugin *azureFilePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
azureVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
@ -226,8 +262,8 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
mountOptions = volume.JoinMountOptions(b.mountOptions, options)
|
||||
mountOptions = appendDefaultMountOptions(mountOptions)
|
||||
mountOptions = volutil.JoinMountOptions(b.mountOptions, options)
|
||||
mountOptions = appendDefaultMountOptions(mountOptions, fsGroup)
|
||||
}
|
||||
|
||||
err = b.mounter.Mount(source, dir, "cifs", mountOptions)
|
||||
@ -270,7 +306,7 @@ func (c *azureFileUnmounter) TearDown() error {
|
||||
}
|
||||
|
||||
func (c *azureFileUnmounter) TearDownAt(dir string) error {
|
||||
return util.UnmountPath(dir, c.mounter)
|
||||
return volutil.UnmountPath(dir, c.mounter)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (string, bool, error) {
|
||||
|
49
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file_test.go
generated
vendored
49
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file_test.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -164,13 +165,6 @@ func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
@ -364,32 +358,55 @@ func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
|
||||
func TestAppendDefaultMountOptions(t *testing.T) {
|
||||
tests := []struct {
|
||||
options []string
|
||||
fsGroup *int64
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
options: []string{"dir_mode=0777"},
|
||||
expected: []string{"dir_mode=0777", fmt.Sprintf("%s=%s", fileMode, defaultFileMode), fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
options: []string{"dir_mode=0777"},
|
||||
fsGroup: nil,
|
||||
expected: []string{"dir_mode=0777",
|
||||
fmt.Sprintf("%s=%s", fileMode, defaultFileMode),
|
||||
fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
},
|
||||
{
|
||||
options: []string{"file_mode=0777"},
|
||||
expected: []string{"file_mode=0777", fmt.Sprintf("%s=%s", dirMode, defaultDirMode), fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
options: []string{"file_mode=0777"},
|
||||
fsGroup: to.Int64Ptr(0),
|
||||
expected: []string{"file_mode=0777",
|
||||
fmt.Sprintf("%s=%s", dirMode, defaultDirMode),
|
||||
fmt.Sprintf("%s=%s", vers, defaultVers),
|
||||
fmt.Sprintf("%s=0", gid)},
|
||||
},
|
||||
{
|
||||
options: []string{"vers=2.1"},
|
||||
expected: []string{"vers=2.1", fmt.Sprintf("%s=%s", fileMode, defaultFileMode), fmt.Sprintf("%s=%s", dirMode, defaultDirMode)},
|
||||
options: []string{"vers=2.1"},
|
||||
fsGroup: to.Int64Ptr(1000),
|
||||
expected: []string{"vers=2.1",
|
||||
fmt.Sprintf("%s=%s", fileMode, defaultFileMode),
|
||||
fmt.Sprintf("%s=%s", dirMode, defaultDirMode),
|
||||
fmt.Sprintf("%s=1000", gid)},
|
||||
},
|
||||
{
|
||||
options: []string{""},
|
||||
expected: []string{"", fmt.Sprintf("%s=%s", fileMode, defaultFileMode), fmt.Sprintf("%s=%s", dirMode, defaultDirMode), fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
options: []string{""},
|
||||
expected: []string{"", fmt.Sprintf("%s=%s",
|
||||
fileMode, defaultFileMode),
|
||||
fmt.Sprintf("%s=%s", dirMode, defaultDirMode),
|
||||
fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
},
|
||||
{
|
||||
options: []string{"file_mode=0777", "dir_mode=0777"},
|
||||
expected: []string{"file_mode=0777", "dir_mode=0777", fmt.Sprintf("%s=%s", vers, defaultVers)},
|
||||
},
|
||||
{
|
||||
options: []string{"gid=2000"},
|
||||
fsGroup: to.Int64Ptr(1000),
|
||||
expected: []string{"gid=2000",
|
||||
fmt.Sprintf("%s=%s", fileMode, defaultFileMode),
|
||||
fmt.Sprintf("%s=%s", dirMode, defaultDirMode),
|
||||
"vers=3.0"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := appendDefaultMountOptions(test.options)
|
||||
result := appendDefaultMountOptions(test.options, test.fsGroup)
|
||||
if !reflect.DeepEqual(result, test.expected) {
|
||||
t.Errorf("input: %q, appendDefaultMountOptions result: %q, expected: %q", test.options, result, test.expected)
|
||||
}
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
var _ volume.DeletableVolumePlugin = &azureFilePlugin{}
|
||||
@ -38,9 +38,11 @@ var _ volume.ProvisionableVolumePlugin = &azureFilePlugin{}
|
||||
// azure cloud provider should implement it
|
||||
type azureCloudProvider interface {
|
||||
// create a file share
|
||||
CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error)
|
||||
CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error)
|
||||
// delete a file share
|
||||
DeleteFileShare(accountName, key, name string) error
|
||||
DeleteFileShare(accountName, accountKey, shareName string) error
|
||||
// resize a file share
|
||||
ResizeFileShare(accountName, accountKey, name string, sizeGiB int) error
|
||||
}
|
||||
|
||||
type azureFileDeleter struct {
|
||||
@ -130,18 +132,18 @@ type azureFileProvisioner struct {
|
||||
var _ volume.Provisioner = &azureFileProvisioner{}
|
||||
|
||||
func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
|
||||
if !util.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
var sku, location, account string
|
||||
|
||||
// File share name has a length limit of 63, and it cannot contain two consecutive '-'s.
|
||||
name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
|
||||
name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
|
||||
name = strings.Replace(name, "--", "-", -1)
|
||||
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
requestGiB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
secretNamespace := a.options.PVC.Namespace
|
||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
@ -164,7 +166,7 @@ func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure file")
|
||||
}
|
||||
|
||||
account, key, err := a.azureProvider.CreateFileShare(name, account, sku, location, requestGB)
|
||||
account, key, err := a.azureProvider.CreateFileShare(name, account, sku, location, requestGiB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -180,14 +182,14 @@ func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
Name: a.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: "azure-file-dynamic-provisioner",
|
||||
util.VolumeDynamicallyCreatedByKey: "azure-file-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: a.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGiB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureFile: &v1.AzureFilePersistentVolumeSource{
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
const (
|
||||
fileMode = "file_mode"
|
||||
dirMode = "dir_mode"
|
||||
gid = "gid"
|
||||
vers = "vers"
|
||||
defaultFileMode = "0755"
|
||||
defaultDirMode = "0755"
|
||||
@ -50,7 +51,7 @@ func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secret
|
||||
return "", "", fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
|
||||
keys, err := kubeClient.Core().Secrets(nameSpace).Get(secretName, metav1.GetOptions{})
|
||||
keys, err := kubeClient.CoreV1().Secrets(nameSpace).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName)
|
||||
}
|
||||
@ -85,7 +86,7 @@ func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accoun
|
||||
},
|
||||
Type: "Opaque",
|
||||
}
|
||||
_, err := kubeClient.Core().Secrets(nameSpace).Create(secret)
|
||||
_, err := kubeClient.CoreV1().Secrets(nameSpace).Create(secret)
|
||||
if errors.IsAlreadyExists(err) {
|
||||
err = nil
|
||||
}
|
||||
@ -95,11 +96,12 @@ func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accoun
|
||||
return secretName, err
|
||||
}
|
||||
|
||||
// check whether mountOptions contain file_mode and dir_mode, if not, append default mode
|
||||
func appendDefaultMountOptions(mountOptions []string) []string {
|
||||
// check whether mountOptions contain file_mode, dir_mode, vers, gid, if not, append default mode
|
||||
func appendDefaultMountOptions(mountOptions []string, fsGroup *int64) []string {
|
||||
fileModeFlag := false
|
||||
dirModeFlag := false
|
||||
versFlag := false
|
||||
gidFlag := false
|
||||
|
||||
for _, mountOption := range mountOptions {
|
||||
if strings.HasPrefix(mountOption, fileMode) {
|
||||
@ -111,6 +113,9 @@ func appendDefaultMountOptions(mountOptions []string) []string {
|
||||
if strings.HasPrefix(mountOption, vers) {
|
||||
versFlag = true
|
||||
}
|
||||
if strings.HasPrefix(mountOption, gid) {
|
||||
gidFlag = true
|
||||
}
|
||||
}
|
||||
|
||||
allMountOptions := mountOptions
|
||||
@ -125,5 +130,9 @@ func appendDefaultMountOptions(mountOptions []string) []string {
|
||||
if !versFlag {
|
||||
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", vers, defaultVers))
|
||||
}
|
||||
|
||||
if !gidFlag && fsGroup != nil {
|
||||
allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%d", gid, *fsGroup))
|
||||
}
|
||||
return allMountOptions
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD
generated
vendored
@ -28,8 +28,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cephfs_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/cephfs",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
141
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go
generated
vendored
141
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go
generated
vendored
@ -19,6 +19,9 @@ package cephfs
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -100,7 +103,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.
|
||||
if kubeClient == nil {
|
||||
return nil, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
|
||||
secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
|
||||
return nil, err
|
||||
@ -145,7 +148,7 @@ func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
|
||||
readonly: readOnly,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
mountOptions: volume.MountOptionFromSpec(spec),
|
||||
mountOptions: util.MountOptionFromSpec(spec),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@ -170,7 +173,7 @@ func (plugin *cephfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*
|
||||
VolumeSource: v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
Monitors: []string{},
|
||||
Path: volumeName,
|
||||
Path: mountPath,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -229,17 +232,38 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
os.MkdirAll(dir, 0750)
|
||||
|
||||
err = cephfsVolume.execMount(dir)
|
||||
if err == nil {
|
||||
return nil
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// cleanup upon failure
|
||||
util.UnmountPath(dir, cephfsVolume.mounter)
|
||||
// return error
|
||||
return err
|
||||
// check whether it belongs to fuse, if not, default to use kernel mount.
|
||||
if cephfsVolume.checkFuseMount() {
|
||||
glog.V(4).Info("CephFS fuse mount.")
|
||||
err = cephfsVolume.execFuseMount(dir)
|
||||
// cleanup no matter if fuse mount fail.
|
||||
keyringPath := cephfsVolume.GetKeyringPath()
|
||||
_, StatErr := os.Stat(keyringPath)
|
||||
if !os.IsNotExist(StatErr) {
|
||||
os.RemoveAll(keyringPath)
|
||||
}
|
||||
if err == nil {
|
||||
// cephfs fuse mount succeeded.
|
||||
return nil
|
||||
} else {
|
||||
// if cephfs fuse mount failed, fallback to kernel mount.
|
||||
glog.V(4).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Info("CephFS kernel mount.")
|
||||
|
||||
err = cephfsVolume.execMount(dir)
|
||||
if err != nil {
|
||||
// cleanup upon failure.
|
||||
util.UnmountPath(dir, cephfsVolume.mounter)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cephfsUnmounter struct {
|
||||
@ -264,6 +288,14 @@ func (cephfsVolume *cephfs) GetPath() string {
|
||||
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
|
||||
}
|
||||
|
||||
// GetKeyringPath creates cephfuse keyring path
|
||||
func (cephfsVolume *cephfs) GetKeyringPath() string {
|
||||
name := cephfsPluginName
|
||||
volumeDir := cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
|
||||
volumeKeyringDir := volumeDir + "~keyring"
|
||||
return volumeKeyringDir
|
||||
}
|
||||
|
||||
func (cephfsVolume *cephfs) execMount(mountpoint string) error {
|
||||
// cephfs mount option
|
||||
ceph_opt := ""
|
||||
@ -291,7 +323,7 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
|
||||
}
|
||||
src += hosts[i] + ":" + cephfsVolume.path
|
||||
|
||||
mountOptions := volume.JoinMountOptions(cephfsVolume.mountOptions, opt)
|
||||
mountOptions := util.JoinMountOptions(cephfsVolume.mountOptions, opt)
|
||||
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", mountOptions); err != nil {
|
||||
return fmt.Errorf("CephFS: mount failed: %v", err)
|
||||
}
|
||||
@ -299,6 +331,91 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cephfsMounter *cephfsMounter) checkFuseMount() bool {
|
||||
execute := cephfsMounter.plugin.host.GetExec(cephfsMounter.plugin.GetPluginName())
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if _, err := execute.Run("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph"); err == nil {
|
||||
glog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
|
||||
// cephfs keyring file
|
||||
keyring_file := ""
|
||||
// override secretfile if secret is provided
|
||||
if cephfsVolume.secret != "" {
|
||||
// TODO: cephfs fuse currently doesn't support secret option,
|
||||
// remove keyring file create once secret option is supported.
|
||||
glog.V(4).Info("cephfs mount begin using fuse.")
|
||||
|
||||
keyringPath := cephfsVolume.GetKeyringPath()
|
||||
os.MkdirAll(keyringPath, 0750)
|
||||
|
||||
payload := make(map[string]util.FileProjection, 1)
|
||||
var fileProjection util.FileProjection
|
||||
|
||||
keyring := fmt.Sprintf("[client.%s]\nkey = %s\n", cephfsVolume.id, cephfsVolume.secret)
|
||||
|
||||
fileProjection.Data = []byte(keyring)
|
||||
fileProjection.Mode = int32(0644)
|
||||
fileName := cephfsVolume.id + ".keyring"
|
||||
|
||||
payload[fileName] = fileProjection
|
||||
|
||||
writerContext := fmt.Sprintf("cephfuse:%v.keyring", cephfsVolume.id)
|
||||
writer, err := util.NewAtomicWriter(keyringPath, writerContext)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create atomic writer: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Write(payload)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to write payload to dir: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
keyring_file = path.Join(keyringPath, fileName)
|
||||
|
||||
} else {
|
||||
keyring_file = cephfsVolume.secret_file
|
||||
}
|
||||
|
||||
// build src like mon1:6789,mon2:6789,mon3:6789:/
|
||||
hosts := cephfsVolume.mon
|
||||
l := len(hosts)
|
||||
// pass all monitors and let ceph randomize and fail over
|
||||
i := 0
|
||||
src := ""
|
||||
for i = 0; i < l-1; i++ {
|
||||
src += hosts[i] + ","
|
||||
}
|
||||
src += hosts[i]
|
||||
|
||||
mountArgs := []string{}
|
||||
mountArgs = append(mountArgs, "-k")
|
||||
mountArgs = append(mountArgs, keyring_file)
|
||||
mountArgs = append(mountArgs, "-m")
|
||||
mountArgs = append(mountArgs, src)
|
||||
mountArgs = append(mountArgs, mountpoint)
|
||||
mountArgs = append(mountArgs, "-r")
|
||||
mountArgs = append(mountArgs, cephfsVolume.path)
|
||||
|
||||
glog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs)
|
||||
command := exec.Command("ceph-fuse", mountArgs...)
|
||||
output, err := command.CombinedOutput()
|
||||
if err != nil || !(strings.Contains(string(output), "starting fuse")) {
|
||||
return fmt.Errorf("Ceph-fuse failed: %v\narguments: %s\nOutput: %s\n", err, mountArgs, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) ([]string, string, string, string, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.CephFS != nil {
|
||||
mon := spec.Volume.CephFS.Monitors
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs_test.go
generated
vendored
@ -77,13 +77,13 @@ func TestPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
|
||||
volumePath := mounter.GetPath()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volumePath := mounter.GetPath()
|
||||
volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1")
|
||||
if volumePath != volpath {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD
generated
vendored
@ -24,7 +24,6 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -43,8 +42,7 @@ go_test(
|
||||
"attacher_test.go",
|
||||
"cinder_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/cinder",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS
generated
vendored
@ -2,7 +2,7 @@ approvers:
|
||||
- jsafrane
|
||||
- anguslees
|
||||
- dims
|
||||
- FengyunPan
|
||||
- FengyunPan2
|
||||
reviewers:
|
||||
- anguslees
|
||||
- rootfs
|
||||
@ -10,4 +10,4 @@ reviewers:
|
||||
- jsafrane
|
||||
- jingxu97
|
||||
- msau42
|
||||
- FengyunPan
|
||||
- FengyunPan2
|
||||
|
80
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
80
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@ -27,16 +28,14 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
type cinderDiskAttacher struct {
|
||||
host volume.VolumeHost
|
||||
cinderProvider CinderProvider
|
||||
cinderProvider BlockStorageProvider
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &cinderDiskAttacher{}
|
||||
@ -44,20 +43,21 @@ var _ volume.Attacher = &cinderDiskAttacher{}
|
||||
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
const (
|
||||
checkSleepDuration = 1 * time.Second
|
||||
operationFinishInitDealy = 1 * time.Second
|
||||
probeVolumeInitDelay = 1 * time.Second
|
||||
probeVolumeFactor = 2.0
|
||||
operationFinishInitDelay = 1 * time.Second
|
||||
operationFinishFactor = 1.1
|
||||
operationFinishSteps = 10
|
||||
diskAttachInitDealy = 1 * time.Second
|
||||
diskAttachInitDelay = 1 * time.Second
|
||||
diskAttachFactor = 1.2
|
||||
diskAttachSteps = 15
|
||||
diskDetachInitDealy = 1 * time.Second
|
||||
diskDetachInitDelay = 1 * time.Second
|
||||
diskDetachFactor = 1.2
|
||||
diskDetachSteps = 13
|
||||
)
|
||||
|
||||
func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
cinder, err := plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -74,7 +74,7 @@ func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string
|
||||
|
||||
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: operationFinishInitDealy,
|
||||
Duration: operationFinishInitDelay,
|
||||
Factor: operationFinishFactor,
|
||||
Steps: operationFinishSteps,
|
||||
}
|
||||
@ -99,7 +99,7 @@ func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error
|
||||
|
||||
func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: diskAttachInitDealy,
|
||||
Duration: diskAttachInitDelay,
|
||||
Factor: diskAttachFactor,
|
||||
Steps: diskAttachSteps,
|
||||
}
|
||||
@ -186,23 +186,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
|
||||
volumeSpecMap[volumeSource.VolumeID] = spec
|
||||
}
|
||||
|
||||
instanceID, err := attacher.nodeInstanceID(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// If node doesn't exist, OpenStack Nova will assume the volumes are not attached to it.
|
||||
// Mark the volumes as detached and return false without error.
|
||||
glog.Warningf("VolumesAreAttached: node %q does not exist.", nodeName)
|
||||
for spec := range volumesAttachedCheck {
|
||||
volumesAttachedCheck[spec] = false
|
||||
}
|
||||
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
attachedResult, err := attacher.cinderProvider.DisksAreAttached(instanceID, volumeIDList)
|
||||
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
@ -231,14 +215,15 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
|
||||
volumeID := volumeSource.VolumeID
|
||||
|
||||
if devicePath == "" {
|
||||
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID)
|
||||
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty", volumeID)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(checkSleepDuration)
|
||||
ticker := time.NewTicker(probeVolumeInitDelay)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
duration := probeVolumeInitDelay
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
@ -252,12 +237,15 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
|
||||
if exists && err == nil {
|
||||
glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
|
||||
return devicePath, nil
|
||||
} else {
|
||||
// Log an error, and continue checking periodically
|
||||
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
|
||||
}
|
||||
// Log an error, and continue checking periodically
|
||||
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
|
||||
// Using exponential backoff instead of linear
|
||||
ticker.Stop()
|
||||
duration = time.Duration(float64(duration) * probeVolumeFactor)
|
||||
ticker = time.NewTicker(duration)
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("Could not find attached Cinder disk %q. Timeout waiting for mount paths to be created.", volumeID)
|
||||
return "", fmt.Errorf("could not find attached Cinder disk %q. Timeout waiting for mount paths to be created", volumeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -297,8 +285,8 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
@ -310,13 +298,13 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
|
||||
|
||||
type cinderDiskDetacher struct {
|
||||
mounter mount.Interface
|
||||
cinderProvider CinderProvider
|
||||
cinderProvider BlockStorageProvider
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &cinderDiskDetacher{}
|
||||
|
||||
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
cinder, err := plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -328,7 +316,7 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
|
||||
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: operationFinishInitDealy,
|
||||
Duration: operationFinishInitDelay,
|
||||
Factor: operationFinishFactor,
|
||||
Steps: operationFinishSteps,
|
||||
}
|
||||
@ -353,7 +341,7 @@ func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error
|
||||
|
||||
func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: diskDetachInitDealy,
|
||||
Duration: diskDetachInitDelay,
|
||||
Factor: diskDetachFactor,
|
||||
Steps: diskDetachSteps,
|
||||
}
|
||||
@ -375,20 +363,10 @@ func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string
|
||||
|
||||
func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
volumeID := path.Base(volumeName)
|
||||
instances, res := detacher.cinderProvider.Instances()
|
||||
if !res {
|
||||
return fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
|
||||
instanceID = instanceID[(ind + 1):]
|
||||
}
|
||||
|
||||
if err := detacher.waitOperationFinished(volumeID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
@ -423,7 +401,7 @@ func (attacher *cinderDiskAttacher) nodeInstanceID(nodeName types.NodeName) (str
|
||||
if !res {
|
||||
return "", fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
instanceID, err := instances.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
161
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
161
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
@ -132,7 +133,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
name: "Attach_Positive",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil},
|
||||
attach: attachCall{instanceID, volumeID, "", nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
@ -147,7 +148,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
name: "Attach_Positive_AlreadyAttached",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, true, nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
@ -173,7 +174,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
name: "Attach_Negative",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError},
|
||||
attach: attachCall{instanceID, volumeID, "/dev/sda", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
@ -187,7 +188,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
name: "Attach_Negative_DiskPatchFails",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil},
|
||||
attach: attachCall{instanceID, volumeID, "", nil},
|
||||
diskPath: diskPathCall{instanceID, volumeID, "", diskPathError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
@ -201,7 +202,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
{
|
||||
name: "VolumesAreAttached_Positive",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: true}, nil},
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, map[string]bool{volumeID: true}, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
@ -214,7 +215,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
{
|
||||
name: "VolumesAreAttached_Negative",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: false}, nil},
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, map[string]bool{volumeID: false}, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
@ -227,7 +228,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
{
|
||||
name: "VolumesAreAttached_CinderFailed",
|
||||
instanceID: instanceID,
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, nil, disksCheckError},
|
||||
disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, nil, disksCheckError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName)
|
||||
@ -242,7 +243,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
name: "Detach_Positive",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, true, nil},
|
||||
detach: detachCall{instanceID, volumeID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
@ -255,7 +256,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
name: "Detach_Positive_AlreadyDetached",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
return "", detacher.Detach(volumeID, nodeName)
|
||||
@ -267,7 +268,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
name: "Detach_Positive_CheckFails",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError},
|
||||
detach: detachCall{instanceID, volumeID, nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
@ -280,7 +281,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
name: "Detach_Negative",
|
||||
instanceID: instanceID,
|
||||
operationPending: operationPendingCall{volumeID, false, done, nil},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError},
|
||||
diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError},
|
||||
detach: detachCall{instanceID, volumeID, detachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
detacher := newDetacher(testcase)
|
||||
@ -426,6 +427,7 @@ type operationPendingCall struct {
|
||||
|
||||
type diskIsAttachedCall struct {
|
||||
instanceID string
|
||||
nodeName types.NodeName
|
||||
volumeID string
|
||||
isAttached bool
|
||||
ret error
|
||||
@ -440,6 +442,7 @@ type diskPathCall struct {
|
||||
|
||||
type disksAreAttachedCall struct {
|
||||
instanceID string
|
||||
nodeName types.NodeName
|
||||
volumeIDs []string
|
||||
areAttached map[string]bool
|
||||
ret error
|
||||
@ -451,18 +454,18 @@ func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.attach looks uninitialized, test did not expect to call
|
||||
// AttachDisk
|
||||
testcase.t.Errorf("Unexpected AttachDisk call!")
|
||||
return "", errors.New("Unexpected AttachDisk call!")
|
||||
testcase.t.Errorf("unexpected AttachDisk call")
|
||||
return "", errors.New("unexpected AttachDisk call")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong volumeID")
|
||||
testcase.t.Errorf("unexpected AttachDisk call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return "", errors.New("unexpected AttachDisk call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong instanceID")
|
||||
testcase.t.Errorf("unexpected AttachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("unexpected AttachDisk call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
|
||||
@ -477,18 +480,18 @@ func (testcase *testcase) DetachDisk(instanceID, volumeID string) error {
|
||||
if expected.devicePath == "" && expected.instanceID == "" {
|
||||
// testcase.detach looks uninitialized, test did not expect to call
|
||||
// DetachDisk
|
||||
testcase.t.Errorf("Unexpected DetachDisk call!")
|
||||
return errors.New("Unexpected DetachDisk call!")
|
||||
testcase.t.Errorf("unexpected DetachDisk call")
|
||||
return errors.New("unexpected DetachDisk call")
|
||||
}
|
||||
|
||||
if expected.devicePath != volumeID {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected volumeID %s, got %s", expected.devicePath, volumeID)
|
||||
return errors.New("Unexpected DetachDisk call: wrong volumeID")
|
||||
testcase.t.Errorf("unexpected DetachDisk call: expected volumeID %s, got %s", expected.devicePath, volumeID)
|
||||
return errors.New("unexpected DetachDisk call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return errors.New("Unexpected DetachDisk call: wrong instanceID")
|
||||
testcase.t.Errorf("unexpected DetachDisk call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return errors.New("unexpected DetachDisk call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
|
||||
@ -525,18 +528,18 @@ func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, err
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call!")
|
||||
return false, errors.New("Unexpected DiskIsAttached call!")
|
||||
testcase.t.Errorf("unexpected DiskIsAttached call")
|
||||
return false, errors.New("unexpected DiskIsAttached call")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong volumeID")
|
||||
testcase.t.Errorf("unexpected DiskIsAttached call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return false, errors.New("unexpected DiskIsAttached call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, errors.New("Unexpected DiskIsAttached call: wrong instanceID")
|
||||
testcase.t.Errorf("unexpected DiskIsAttached call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, errors.New("unexpected DiskIsAttached call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
|
||||
@ -549,18 +552,18 @@ func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (st
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.diskPath looks uninitialized, test did not expect to
|
||||
// call GetAttachmentDiskPath
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call!")
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call!")
|
||||
testcase.t.Errorf("unexpected GetAttachmentDiskPath call")
|
||||
return "", errors.New("unexpected GetAttachmentDiskPath call")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong volumeID")
|
||||
testcase.t.Errorf("unexpected GetAttachmentDiskPath call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return "", errors.New("unexpected GetAttachmentDiskPath call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected GetAttachmentDiskPath call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("Unexpected GetAttachmentDiskPath call: wrong instanceID")
|
||||
testcase.t.Errorf("unexpected GetAttachmentDiskPath call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return "", errors.New("unexpected GetAttachmentDiskPath call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
|
||||
@ -572,6 +575,46 @@ func (testcase *testcase) ShouldTrustDevicePath() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) {
|
||||
expected := &testcase.diskIsAttached
|
||||
instanceID := expected.instanceID
|
||||
// If testcase call DetachDisk*, return false
|
||||
if *testcase.attachOrDetach == detachStatus {
|
||||
return false, instanceID, nil
|
||||
}
|
||||
|
||||
// If testcase call AttachDisk*, return true
|
||||
if *testcase.attachOrDetach == attachStatus {
|
||||
return true, instanceID, nil
|
||||
}
|
||||
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected nodename %s, got %s", expected.nodeName, nodeName)
|
||||
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong nodename")
|
||||
}
|
||||
|
||||
if expected.volumeID == "" && expected.instanceID == "" {
|
||||
// testcase.diskIsAttached looks uninitialized, test did not expect to
|
||||
// call DiskIsAttached
|
||||
testcase.t.Errorf("unexpected DiskIsAttachedByName call")
|
||||
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call")
|
||||
}
|
||||
|
||||
if expected.volumeID != volumeID {
|
||||
testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected volumeID %s, got %s", expected.volumeID, volumeID)
|
||||
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("unexpected DiskIsAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
|
||||
|
||||
return expected.isAttached, expected.instanceID, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
|
||||
return "", "", false, errors.New("Not implemented")
|
||||
}
|
||||
@ -626,36 +669,66 @@ func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string
|
||||
return expected.areAttached, expected.ret
|
||||
}
|
||||
|
||||
func (testcase *testcase) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) {
|
||||
expected := &testcase.disksAreAttached
|
||||
areAttached := make(map[string]bool)
|
||||
|
||||
instanceID := expected.instanceID
|
||||
if expected.nodeName != nodeName {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected nodeName %s, got %s", expected.nodeName, nodeName)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong nodename")
|
||||
}
|
||||
if len(expected.volumeIDs) == 0 && expected.instanceID == "" {
|
||||
// testcase.volumeIDs looks uninitialized, test did not expect to call DisksAreAttached
|
||||
testcase.t.Errorf("Unexpected DisksAreAttachedByName call!")
|
||||
return areAttached, errors.New("Unexpected DisksAreAttachedByName call")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expected.volumeIDs, volumeIDs) {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected volumeIDs %v, got %v", expected.volumeIDs, volumeIDs)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong volumeID")
|
||||
}
|
||||
|
||||
if expected.instanceID != instanceID {
|
||||
testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID)
|
||||
return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong instanceID")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret)
|
||||
|
||||
return expected.areAttached, expected.ret
|
||||
}
|
||||
|
||||
// Implementation of fake cloudprovider.Instances
|
||||
type instances struct {
|
||||
instanceID string
|
||||
}
|
||||
|
||||
func (instances *instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
func (instances *instances) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
return []v1.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
func (instances *instances) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
return []v1.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) ExternalID(name types.NodeName) (string, error) {
|
||||
func (instances *instances) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceID(name types.NodeName) (string, error) {
|
||||
func (instances *instances) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return instances.instanceID, nil
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceType(name types.NodeName) (string, error) {
|
||||
func (instances *instances) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
func (instances *instances) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
func (instances *instances) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
@ -663,10 +736,10 @@ func (instances *instances) List(filter string) ([]types.NodeName, error) {
|
||||
return []types.NodeName{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
func (instances *instances) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
func (instances *instances) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
55
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
@ -34,15 +34,20 @@ import (
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
const (
|
||||
// DefaultCloudConfigPath is the default path for cloud configuration
|
||||
DefaultCloudConfigPath = "/etc/kubernetes/cloud-config"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&cinderPlugin{}}
|
||||
}
|
||||
|
||||
type CinderProvider interface {
|
||||
// BlockStorageProvider is the interface for accessing cinder functionality.
|
||||
type BlockStorageProvider interface {
|
||||
AttachDisk(instanceID, volumeID string) (string, error)
|
||||
DetachDisk(instanceID, volumeID string) error
|
||||
DeleteVolume(volumeID string) error
|
||||
@ -52,7 +57,8 @@ type CinderProvider interface {
|
||||
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
|
||||
OperationPending(diskName string) (bool, string, error)
|
||||
DiskIsAttached(instanceID, volumeID string) (bool, error)
|
||||
DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error)
|
||||
DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error)
|
||||
DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error)
|
||||
ShouldTrustDevicePath() bool
|
||||
Instances() (cloudprovider.Instances, bool)
|
||||
ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
|
||||
@ -115,7 +121,7 @@ func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
return plugin.newMounterInternal(spec, pod.UID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
@ -138,11 +144,11 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
blockDeviceMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
return plugin.newUnmounterInternal(volName, podUID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
@ -157,7 +163,7 @@ func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UI
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return plugin.newDeleterInternal(spec, &CinderDiskUtil{})
|
||||
return plugin.newDeleterInternal(spec, &DiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdManager) (volume.Deleter, error) {
|
||||
@ -174,7 +180,7 @@ func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdMana
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return plugin.newProvisionerInternal(options, &CinderDiskUtil{})
|
||||
return plugin.newProvisionerInternal(options, &DiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, manager cdManager) (volume.Provisioner, error) {
|
||||
@ -187,25 +193,30 @@ func (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getCloudProvider(cloudProvider cloudprovider.Interface) (CinderProvider, error) {
|
||||
if cloud, ok := cloudProvider.(*openstack.OpenStack); ok && cloud != nil {
|
||||
return cloud, nil
|
||||
}
|
||||
return nil, fmt.Errorf("wrong cloud type")
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) getCloudProvider() (CinderProvider, error) {
|
||||
func (plugin *cinderPlugin) getCloudProvider() (BlockStorageProvider, error) {
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
if cloud == nil {
|
||||
glog.Errorf("Cloud provider not initialized properly")
|
||||
return nil, errors.New("Cloud provider not initialized properly")
|
||||
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
|
||||
var config *os.File
|
||||
config, err = os.Open(DefaultCloudConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load OpenStack configuration from default path : %v", err)
|
||||
}
|
||||
defer config.Close()
|
||||
cloud, err = cloudprovider.GetCloudProvider(openstack.ProviderName, config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create OpenStack cloud provider from default path : %v", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("OpenStack cloud provider was not initialized properly : %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
switch cloud := cloud.(type) {
|
||||
case *openstack.OpenStack:
|
||||
return cloud, nil
|
||||
default:
|
||||
return nil, errors.New("Invalid cloud provider: expected OpenStack.")
|
||||
return nil, errors.New("invalid cloud provider: expected OpenStack")
|
||||
}
|
||||
}
|
||||
|
||||
@ -489,7 +500,7 @@ type cinderVolumeProvisioner struct {
|
||||
var _ volume.Provisioner = &cinderVolumeProvisioner{}
|
||||
|
||||
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
@ -503,7 +514,7 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
Name: c.options.PVName,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner",
|
||||
util.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
@ -172,13 +172,6 @@ func TestPlugin(t *testing.T) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
|
41
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go
generated
vendored
@ -32,14 +32,16 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
type CinderDiskUtil struct{}
|
||||
// DiskUtil has utility/helper methods
|
||||
type DiskUtil struct{}
|
||||
|
||||
// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
|
||||
// AttachDisk attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
|
||||
// Mounts the disk to its global path.
|
||||
func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||
func (util *DiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||
options := []string{}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
@ -98,8 +100,8 @@ func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath stri
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmounts the device and detaches the disk from the kubelet's host machine.
|
||||
func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
|
||||
// DetachDisk unmounts the device and detaches the disk from the kubelet's host machine.
|
||||
func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
|
||||
globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName)
|
||||
if err := cd.mounter.Unmount(globalPDPath); err != nil {
|
||||
return err
|
||||
@ -124,7 +126,8 @@ func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (util *CinderDiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
// DeleteVolume uses the cloud entrypoint to delete specified volume
|
||||
func (util *DiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
cloud, err := cd.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -158,7 +161,8 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
|
||||
// CreateVolume uses the cloud provider entrypoint for creating a volume
|
||||
func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
|
||||
cloud, err := c.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return "", 0, nil, "", err
|
||||
@ -167,8 +171,8 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
|
||||
volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
|
||||
vtype := ""
|
||||
availability := ""
|
||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||
@ -200,7 +204,7 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
|
||||
// if we did not get any zones, lets leave it blank and gophercloud will
|
||||
// use zone "nova" as default
|
||||
if len(zones) > 0 {
|
||||
availability = volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -224,6 +228,17 @@ func probeAttachedVolume() error {
|
||||
scsiHostRescan()
|
||||
|
||||
executor := exec.New()
|
||||
|
||||
// udevadm settle waits for udevd to process the device creation
|
||||
// events for all hardware devices, thus ensuring that any device
|
||||
// nodes have been created successfully before proceeding.
|
||||
argsSettle := []string{"settle"}
|
||||
cmdSettle := executor.Command("udevadm", argsSettle...)
|
||||
_, errSettle := cmdSettle.CombinedOutput()
|
||||
if errSettle != nil {
|
||||
glog.Errorf("error running udevadm settle %v\n", errSettle)
|
||||
}
|
||||
|
||||
args := []string{"trigger"}
|
||||
cmd := executor.Command("udevadm", args...)
|
||||
_, err := cmd.CombinedOutput()
|
||||
@ -236,10 +251,10 @@ func probeAttachedVolume() error {
|
||||
}
|
||||
|
||||
func scsiHostRescan() {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := ioutil.ReadDir(scsi_path); err == nil {
|
||||
scsiPath := "/sys/class/scsi_host/"
|
||||
if dirs, err := ioutil.ReadDir(scsiPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
name := scsiPath + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
ioutil.WriteFile(name, data, 0666)
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD
generated
vendored
@ -30,8 +30,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["configmap_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/configmap",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go
generated
vendored
@ -194,6 +194,9 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optional := b.source.Optional != nil && *b.source.Optional
|
||||
configMap, err := b.getConfigMap(b.pod.Namespace, b.source.Name)
|
||||
@ -214,7 +217,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
b.source.Name,
|
||||
len(configMap.Data),
|
||||
len(configMap.Data)+len(configMap.BinaryData),
|
||||
totalBytes)
|
||||
|
||||
payload, err := MakePayload(b.source.Items, configMap, b.source.DefaultMode, optional)
|
||||
@ -250,7 +253,7 @@ func MakePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *
|
||||
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
|
||||
}
|
||||
|
||||
payload := make(map[string]volumeutil.FileProjection, len(configMap.Data))
|
||||
payload := make(map[string]volumeutil.FileProjection, (len(configMap.Data) + len(configMap.BinaryData)))
|
||||
var fileProjection volumeutil.FileProjection
|
||||
|
||||
if len(mappings) == 0 {
|
||||
@ -259,17 +262,24 @@ func MakePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *
|
||||
fileProjection.Mode = *defaultMode
|
||||
payload[name] = fileProjection
|
||||
}
|
||||
for name, data := range configMap.BinaryData {
|
||||
fileProjection.Data = data
|
||||
fileProjection.Mode = *defaultMode
|
||||
payload[name] = fileProjection
|
||||
}
|
||||
} else {
|
||||
for _, ktp := range mappings {
|
||||
content, ok := configMap.Data[ktp.Key]
|
||||
if !ok {
|
||||
if stringData, ok := configMap.Data[ktp.Key]; ok {
|
||||
fileProjection.Data = []byte(stringData)
|
||||
} else if binaryData, ok := configMap.BinaryData[ktp.Key]; ok {
|
||||
fileProjection.Data = binaryData
|
||||
} else {
|
||||
if optional {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("configmap references non-existent config key: %s", ktp.Key)
|
||||
}
|
||||
|
||||
fileProjection.Data = []byte(content)
|
||||
if ktp.Mode != nil {
|
||||
fileProjection.Mode = *ktp.Mode
|
||||
} else {
|
||||
@ -287,6 +297,9 @@ func totalBytes(configMap *v1.ConfigMap) int {
|
||||
for _, value := range configMap.Data {
|
||||
totalSize += len(value)
|
||||
}
|
||||
for _, value := range configMap.BinaryData {
|
||||
totalSize += len(value)
|
||||
}
|
||||
|
||||
return totalSize
|
||||
}
|
||||
@ -303,7 +316,7 @@ func (c *configMapVolumeUnmounter) TearDown() error {
|
||||
}
|
||||
|
||||
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
|
||||
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap_test.go
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap_test.go
generated
vendored
@ -62,6 +62,38 @@ func TestMakePayload(t *testing.T) {
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "no overrides binary data",
|
||||
configMap: &v1.ConfigMap{
|
||||
BinaryData: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
"bar": []byte("bar"),
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo": {Data: []byte("foo"), Mode: 0644},
|
||||
"bar": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "no overrides mixed data",
|
||||
configMap: &v1.ConfigMap{
|
||||
BinaryData: map[string][]byte{
|
||||
"foo": []byte("foo"),
|
||||
},
|
||||
Data: map[string]string{
|
||||
"bar": "bar",
|
||||
},
|
||||
},
|
||||
mode: 0644,
|
||||
payload: map[string]util.FileProjection{
|
||||
"foo": {Data: []byte("foo"), Mode: 0644},
|
||||
"bar": {Data: []byte("bar"), Mode: 0644},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
name: "basic 1",
|
||||
mappings: []v1.KeyToPath{
|
||||
@ -466,13 +498,35 @@ func TestPluginOptional(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
datadirSymlink := path.Join(volumePath, "..data")
|
||||
datadir, err := os.Readlink(datadirSymlink)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
t.Fatalf("couldn't find volume path's data dir, %s", datadirSymlink)
|
||||
} else if err != nil {
|
||||
t.Fatalf("couldn't read symlink, %s", datadirSymlink)
|
||||
}
|
||||
datadirPath := path.Join(volumePath, datadir)
|
||||
|
||||
infos, err := ioutil.ReadDir(volumePath)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't find volume path, %s", volumePath)
|
||||
}
|
||||
if len(infos) != 0 {
|
||||
t.Errorf("empty directory, %s, not found", volumePath)
|
||||
for _, fi := range infos {
|
||||
if fi.Name() != "..data" && fi.Name() != datadir {
|
||||
t.Errorf("empty data directory, %s, is not empty. Contains: %s", datadirSymlink, fi.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
infos, err = ioutil.ReadDir(datadirPath)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't find volume data path, %s", datadirPath)
|
||||
}
|
||||
if len(infos) != 0 {
|
||||
t.Errorf("empty data directory, %s, is not empty. Contains: %s", datadirSymlink, infos[0].Name())
|
||||
}
|
||||
|
||||
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
@ -7,6 +7,7 @@ go_library(
|
||||
"csi_client.go",
|
||||
"csi_mounter.go",
|
||||
"csi_plugin.go",
|
||||
"csi_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi",
|
||||
visibility = ["//visibility:public"],
|
||||
@ -15,15 +16,16 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -36,22 +38,22 @@ go_test(
|
||||
"csi_mounter_test.go",
|
||||
"csi_plugin_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csi/fake:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
408
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
408
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
@ -20,24 +20,37 @@ import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
grpctx "golang.org/x/net/context"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1alpha1"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
persistentVolumeInGlobalPath = "pv"
|
||||
globalMountInGlobalPath = "globalmount"
|
||||
)
|
||||
|
||||
type csiAttacher struct {
|
||||
plugin *csiPlugin
|
||||
k8s kubernetes.Interface
|
||||
waitSleepTime time.Duration
|
||||
|
||||
csiClient csiClient
|
||||
}
|
||||
|
||||
// volume.Attacher methods
|
||||
@ -73,7 +86,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
|
||||
Status: storage.VolumeAttachmentStatus{Attached: false},
|
||||
}
|
||||
|
||||
_, err = c.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
||||
_, err = c.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
alreadyExist := false
|
||||
if err != nil {
|
||||
if !apierrs.IsAlreadyExists(err) {
|
||||
@ -91,7 +104,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
|
||||
|
||||
// probe for attachment update here
|
||||
// NOTE: any error from waiting for attachment is logged only. This is because
|
||||
// the primariy intent of the enclosing method is to create VolumeAttachment.
|
||||
// the primary intent of the enclosing method is to create VolumeAttachment.
|
||||
// DONOT return that error here as it is mitigated in attacher.WaitForAttach.
|
||||
volAttachmentOK := true
|
||||
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
|
||||
@ -117,37 +130,79 @@ func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.
|
||||
func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) {
|
||||
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
|
||||
|
||||
ticker := time.NewTicker(c.waitSleepTime)
|
||||
defer ticker.Stop()
|
||||
|
||||
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
|
||||
defer timer.Stop()
|
||||
|
||||
//TODO (vladimirvivien) instead of polling api-server, change to a api-server watch
|
||||
return c.waitForVolumeAttachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) {
|
||||
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
|
||||
return "", err
|
||||
}
|
||||
// if being deleted, fail fast
|
||||
if attach.GetDeletionTimestamp() != nil {
|
||||
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment is being deleted")
|
||||
}
|
||||
// attachment OK
|
||||
if attach.Status.Attached {
|
||||
return attachID, nil
|
||||
}
|
||||
// driver reports attach error
|
||||
attachErr := attach.Status.AttachError
|
||||
if attachErr != nil {
|
||||
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
|
||||
return "", errors.New(attachErr.Message)
|
||||
}
|
||||
|
||||
watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("watch error:%v for volume %v", err, volumeHandle)
|
||||
}
|
||||
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.WaitForAttach failed (will continue to try): %v", err))
|
||||
continue
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
|
||||
return "", errors.New("volume attachment watch channel had been closed")
|
||||
}
|
||||
// if being deleted, fail fast
|
||||
if attach.GetDeletionTimestamp() != nil {
|
||||
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment is being deleted")
|
||||
}
|
||||
// attachment OK
|
||||
if attach.Status.Attached {
|
||||
return attachID, nil
|
||||
}
|
||||
// driver reports attach error
|
||||
attachErr := attach.Status.AttachError
|
||||
if attachErr != nil {
|
||||
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
|
||||
return "", errors.New(attachErr.Message)
|
||||
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
attach, _ := event.Object.(*storage.VolumeAttachment)
|
||||
// if being deleted, fail fast
|
||||
if attach.GetDeletionTimestamp() != nil {
|
||||
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment is being deleted")
|
||||
}
|
||||
// attachment OK
|
||||
if attach.Status.Attached {
|
||||
return attachID, nil
|
||||
}
|
||||
// driver reports attach error
|
||||
attachErr := attach.Status.AttachError
|
||||
if attachErr != nil {
|
||||
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
|
||||
return "", errors.New(attachErr.Message)
|
||||
}
|
||||
case watch.Deleted:
|
||||
// if deleted, fail fast
|
||||
glog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID))
|
||||
return "", errors.New("volume attachment has been deleted")
|
||||
|
||||
case watch.Error:
|
||||
// start another cycle
|
||||
c.waitForVolumeAttachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
|
||||
return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle)
|
||||
@ -173,7 +228,7 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
|
||||
|
||||
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
|
||||
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
|
||||
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
|
||||
continue
|
||||
@ -186,12 +241,125 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
|
||||
}
|
||||
|
||||
func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
glog.V(4).Info(log("attacher.GetDeviceMountPath is not implemented"))
|
||||
return "", nil
|
||||
glog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec))
|
||||
deviceMountPath, err := makeDeviceMountPath(c.plugin, spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err))
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath)
|
||||
return deviceMountPath, nil
|
||||
}
|
||||
|
||||
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
glog.V(4).Info(log("attacher.MountDevice is not implemented"))
|
||||
glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
||||
|
||||
mounted, err := isDirMounted(c.plugin, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath))
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted {
|
||||
glog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Setup
|
||||
if spec == nil {
|
||||
return fmt.Errorf("attacher.MountDevice failed, spec is nil")
|
||||
}
|
||||
csiSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
if c.csiClient == nil {
|
||||
if csiSource.Driver == "" {
|
||||
return fmt.Errorf("attacher.MountDevice failed, driver name is empty")
|
||||
}
|
||||
addr := fmt.Sprintf(csiAddrTemplate, csiSource.Driver)
|
||||
c.csiClient = newCsiDriverClient("unix", addr)
|
||||
}
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to check STAGE_UNSTAGE_VOLUME: %v", err))
|
||||
return err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start MountDevice
|
||||
if deviceMountPath == "" {
|
||||
return fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
|
||||
}
|
||||
|
||||
nodeName := string(c.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed while getting volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
// create target_dir before call to NodeStageVolume
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
fsType := csiSource.FSType
|
||||
if len(fsType) == 0 {
|
||||
fsType = defaultFSType
|
||||
}
|
||||
|
||||
nodeStageSecrets := map[string]string{}
|
||||
if csiSource.NodeStageSecretRef != nil {
|
||||
nodeStageSecrets = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
|
||||
}
|
||||
|
||||
err = csi.NodeStageVolume(ctx,
|
||||
csiSource.VolumeHandle,
|
||||
publishVolumeInfo,
|
||||
deviceMountPath,
|
||||
fsType,
|
||||
accessMode,
|
||||
nodeStageSecrets,
|
||||
csiSource.VolumeAttributes)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.MountDevice failed: %v", err))
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to remove mount dir after a NodeStageVolume() error [%s]: %v", deviceMountPath, err))
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -201,7 +369,7 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
|
||||
if volumeName == "" {
|
||||
glog.Error(log("detacher.Detach missing value for parameter volumeName"))
|
||||
return errors.New("missing exepected parameter volumeName")
|
||||
return errors.New("missing expected parameter volumeName")
|
||||
}
|
||||
parts := strings.Split(volumeName, volNameSep)
|
||||
if len(parts) != 2 {
|
||||
@ -212,7 +380,7 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
driverName := parts[0]
|
||||
volID := parts[1]
|
||||
attachID := getAttachmentName(volID, driverName, string(nodeName))
|
||||
if err := c.k8s.StorageV1alpha1().VolumeAttachments().Delete(attachID, nil); err != nil {
|
||||
if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil {
|
||||
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
@ -224,49 +392,179 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error {
|
||||
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
|
||||
|
||||
ticker := time.NewTicker(c.waitSleepTime)
|
||||
defer ticker.Stop()
|
||||
|
||||
timeout := c.waitSleepTime * 10
|
||||
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
|
||||
defer timer.Stop()
|
||||
|
||||
//TODO (vladimirvivien) instead of polling api-server, change to a api-server watch
|
||||
return c.waitForVolumeDetachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error {
|
||||
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
//object deleted or never existed, done
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
|
||||
return nil
|
||||
}
|
||||
glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
|
||||
return err
|
||||
}
|
||||
// driver reports attach error
|
||||
detachErr := attach.Status.DetachError
|
||||
if detachErr != nil {
|
||||
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
|
||||
return errors.New(detachErr.Message)
|
||||
}
|
||||
|
||||
watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("watch error:%v for volume %v", err, volumeHandle)
|
||||
}
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
//object deleted or never existed, done
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
|
||||
return nil
|
||||
}
|
||||
glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
|
||||
continue
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
|
||||
return errors.New("volume attachment watch channel had been closed")
|
||||
}
|
||||
|
||||
// driver reports attach error
|
||||
detachErr := attach.Status.DetachError
|
||||
if detachErr != nil {
|
||||
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
|
||||
return errors.New(detachErr.Message)
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
attach, _ := event.Object.(*storage.VolumeAttachment)
|
||||
// driver reports attach error
|
||||
detachErr := attach.Status.DetachError
|
||||
if detachErr != nil {
|
||||
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
|
||||
return errors.New(detachErr.Message)
|
||||
}
|
||||
case watch.Deleted:
|
||||
//object deleted
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle))
|
||||
return nil
|
||||
|
||||
case watch.Error:
|
||||
// start another cycle
|
||||
c.waitForVolumeDetachmentInternal(volumeHandle, attachID, timer, timeout)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
|
||||
return fmt.Errorf("detachment timed out for volume %v", volumeHandle)
|
||||
return fmt.Errorf("detachment timeout for volume %v", volumeHandle)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
glog.V(4).Info(log("detacher.UnmountDevice is not implemented"))
|
||||
glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
|
||||
|
||||
// Setup
|
||||
driverName, volID, err := getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
if c.csiClient == nil {
|
||||
addr := fmt.Sprintf(csiAddrTemplate, driverName)
|
||||
c.csiClient = newCsiDriverClient("unix", addr)
|
||||
}
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
|
||||
return err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start UnmountDevice
|
||||
err = csi.NodeUnstageVolume(ctx,
|
||||
volID,
|
||||
deviceMountPath)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasStageUnstageCapability(ctx grpctx.Context, csi csiClient) (bool, error) {
|
||||
capabilities, err := csi.NodeGetCapabilities(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
stageUnstageSet := false
|
||||
if capabilities == nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, capability := range capabilities {
|
||||
if capability.GetRpc().GetType() == csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
|
||||
stageUnstageSet = true
|
||||
}
|
||||
}
|
||||
return stageUnstageSet, nil
|
||||
}
|
||||
|
||||
// getAttachmentName returns csi-<sha252(volName,csiDriverName,NodeName>
|
||||
func getAttachmentName(volName, csiDriverName, nodeName string) string {
|
||||
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))
|
||||
return fmt.Sprintf("csi-%x", result)
|
||||
}
|
||||
|
||||
func makeDeviceMountPath(plugin *csiPlugin, spec *volume.Spec) (string, error) {
|
||||
if spec == nil {
|
||||
return "", fmt.Errorf("makeDeviceMountPath failed, spec is nil")
|
||||
}
|
||||
|
||||
pvName := spec.PersistentVolume.Name
|
||||
if pvName == "" {
|
||||
return "", fmt.Errorf("makeDeviceMountPath failed, pv name empty")
|
||||
}
|
||||
|
||||
return path.Join(plugin.host.GetPluginDir(plugin.GetPluginName()), persistentVolumeInGlobalPath, pvName, globalMountInGlobalPath), nil
|
||||
}
|
||||
|
||||
func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMountPath string) (string, string, error) {
|
||||
// deviceMountPath structure: /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}/globalmount
|
||||
dir := filepath.Dir(deviceMountPath)
|
||||
if file := filepath.Base(deviceMountPath); file != globalMountInGlobalPath {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, path did not end in %s", globalMountInGlobalPath)
|
||||
}
|
||||
// dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}
|
||||
pvName := filepath.Base(dir)
|
||||
|
||||
// Get PV and check for errors
|
||||
pv, err := k8s.CoreV1().PersistentVolumes().Get(pvName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if pv == nil || pv.Spec.CSI == nil {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath could not find CSI Persistent Volume Source for pv: %s", pvName)
|
||||
}
|
||||
|
||||
// Get VolumeHandle and PluginName from pv
|
||||
csiSource := pv.Spec.CSI
|
||||
if csiSource.Driver == "" {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, driver name empty")
|
||||
}
|
||||
if csiSource.VolumeHandle == "" {
|
||||
return "", "", fmt.Errorf("getDriverAndVolNameFromDeviceMountPath failed, VolumeHandle empty")
|
||||
}
|
||||
|
||||
return csiSource.Driver, csiSource.VolumeHandle, nil
|
||||
}
|
||||
|
488
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher_test.go
generated
vendored
488
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher_test.go
generated
vendored
@ -19,14 +19,21 @@ package csi
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
storage "k8s.io/api/storage/v1alpha1"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
|
||||
@ -50,15 +57,6 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach
|
||||
}
|
||||
|
||||
func TestAttacherAttach(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@ -110,13 +108,24 @@ func TestAttacherAttach(t *testing.T) {
|
||||
|
||||
// attacher loop
|
||||
for i, tc := range testCases {
|
||||
t.Log("test case: ", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
|
||||
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
spec := volume.NewSpecFromPersistentVolume(makeTestPV(fmt.Sprintf("test-pv%d", i), 10, tc.driverName, tc.volumeName), false)
|
||||
|
||||
go func(id, nodename string, fail bool) {
|
||||
attachID, err := csiAttacher.Attach(spec, types.NodeName(nodename))
|
||||
if !fail && err != nil {
|
||||
t.Error("was not expecting failure, but got err: ", err)
|
||||
t.Errorf("expecting no failure, but got err: %v", err)
|
||||
}
|
||||
if attachID != id && !fail {
|
||||
t.Errorf("expecting attachID %v, got %v", id, attachID)
|
||||
@ -129,7 +138,7 @@ func TestAttacherAttach(t *testing.T) {
|
||||
// wait for attachment to be saved
|
||||
var attach *storage.VolumeAttachment
|
||||
for i := 0; i < 100; i++ {
|
||||
attach, err = csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||
attach, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
<-ticker.C
|
||||
@ -143,18 +152,21 @@ func TestAttacherAttach(t *testing.T) {
|
||||
}
|
||||
|
||||
if attach == nil {
|
||||
t.Error("attachment not found")
|
||||
}
|
||||
attach.Status.Attached = true
|
||||
_, err = csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Update(attach)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Logf("attachment not found for id:%v", tc.attachID)
|
||||
} else {
|
||||
attach.Status.Attached = true
|
||||
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
fakeWatcher.Modify(attach)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
|
||||
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
@ -165,42 +177,92 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
nodeName := "test-node"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
attached bool
|
||||
attachErr *storage.VolumeError
|
||||
sleepTime time.Duration
|
||||
timeout time.Duration
|
||||
shouldFail bool
|
||||
name string
|
||||
initAttached bool
|
||||
finalAttached bool
|
||||
trigerWatchEventTime time.Duration
|
||||
initAttachErr *storage.VolumeError
|
||||
finalAttachErr *storage.VolumeError
|
||||
sleepTime time.Duration
|
||||
timeout time.Duration
|
||||
shouldFail bool
|
||||
}{
|
||||
{name: "attach ok", attached: true, sleepTime: 10 * time.Millisecond, timeout: 50 * time.Millisecond},
|
||||
{name: "attachment error", attachErr: &storage.VolumeError{Message: "missing volume"}, sleepTime: 10 * time.Millisecond, timeout: 30 * time.Millisecond},
|
||||
{name: "time ran out", attached: false, sleepTime: 5 * time.Millisecond},
|
||||
{
|
||||
name: "attach success at get",
|
||||
initAttached: true,
|
||||
sleepTime: 10 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
name: "attachment error ant get",
|
||||
initAttachErr: &storage.VolumeError{Message: "missing volume"},
|
||||
sleepTime: 10 * time.Millisecond,
|
||||
timeout: 30 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "attach success at watch",
|
||||
initAttached: false,
|
||||
finalAttached: true,
|
||||
trigerWatchEventTime: 5 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
sleepTime: 5 * time.Millisecond,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
name: "attachment error ant watch",
|
||||
initAttached: false,
|
||||
finalAttached: false,
|
||||
finalAttachErr: &storage.VolumeError{Message: "missing volume"},
|
||||
trigerWatchEventTime: 5 * time.Millisecond,
|
||||
sleepTime: 10 * time.Millisecond,
|
||||
timeout: 30 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "time ran out",
|
||||
initAttached: false,
|
||||
finalAttached: true,
|
||||
trigerWatchEventTime: 100 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
sleepTime: 5 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
fakeWatcher.Reset()
|
||||
t.Logf("running test: %v", tc.name)
|
||||
pvName := fmt.Sprintf("test-pv-%d", i)
|
||||
volID := fmt.Sprintf("test-vol-%d", i)
|
||||
attachID := getAttachmentName(volID, testDriver, nodeName)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = tc.attached
|
||||
attachment.Status.AttachError = tc.attachErr
|
||||
attachment.Status.Attached = tc.initAttached
|
||||
attachment.Status.AttachError = tc.initAttachErr
|
||||
csiAttacher.waitSleepTime = tc.sleepTime
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
_, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
}()
|
||||
// after timeout, fakeWatcher will be closed by csiAttacher.waitForVolumeAttachment
|
||||
if tc.trigerWatchEventTime > 0 && tc.trigerWatchEventTime < tc.timeout {
|
||||
go func() {
|
||||
time.Sleep(tc.trigerWatchEventTime)
|
||||
attachment.Status.Attached = tc.finalAttached
|
||||
attachment.Status.AttachError = tc.finalAttachErr
|
||||
fakeWatcher.Modify(attachment)
|
||||
}()
|
||||
}
|
||||
|
||||
retID, err := csiAttacher.waitForVolumeAttachment(volID, attachID, tc.timeout)
|
||||
if tc.shouldFail && err == nil {
|
||||
t.Error("expecting failure, but err is nil")
|
||||
}
|
||||
if tc.attachErr != nil {
|
||||
if tc.attachErr.Message != err.Error() {
|
||||
t.Errorf("expecting error [%v], got [%v]", tc.attachErr.Message, err.Error())
|
||||
if tc.initAttachErr != nil {
|
||||
if tc.initAttachErr.Message != err.Error() {
|
||||
t.Errorf("expecting error [%v], got [%v]", tc.initAttachErr.Message, err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil && retID != attachID {
|
||||
@ -239,7 +301,7 @@ func TestAttacherVolumesAreAttached(t *testing.T) {
|
||||
attachID := getAttachmentName(volName, testDriver, nodeName)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pv.GetName())
|
||||
attachment.Status.Attached = stat
|
||||
_, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
@ -268,14 +330,7 @@ func TestAttacherVolumesAreAttached(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttacherDetach(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
nodeName := "test-node"
|
||||
testCases := []struct {
|
||||
name string
|
||||
@ -289,10 +344,20 @@ func TestAttacherDetach(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("running test: %v", tc.name)
|
||||
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, tc.volID)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
attachment := makeTestAttachment(tc.attachID, nodeName, "test-pv")
|
||||
_, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
@ -300,6 +365,9 @@ func TestAttacherDetach(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("test case %s failed: %v", tc.name, err)
|
||||
}
|
||||
go func() {
|
||||
fakeWatcher.Delete(attachment)
|
||||
}()
|
||||
err = csiAttacher.Detach(volumeName, types.NodeName(nodeName))
|
||||
if tc.shouldFail && err == nil {
|
||||
t.Fatal("expecting failure, but err = nil")
|
||||
@ -307,7 +375,7 @@ func TestAttacherDetach(t *testing.T) {
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
attach, err := csiAttacher.k8s.StorageV1alpha1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||
attach, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
if !apierrs.IsNotFound(err) {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
@ -319,3 +387,323 @@ func TestAttacherDetach(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherGetDeviceMountPath(t *testing.T) {
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, _, tmpDir := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
|
||||
pluginDir := csiAttacher.plugin.host.GetPluginDir(plug.GetPluginName())
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
pvName string
|
||||
expectedMountPath string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal test",
|
||||
pvName: "test-pv1",
|
||||
expectedMountPath: pluginDir + "/pv/test-pv1/globalmount",
|
||||
},
|
||||
{
|
||||
testName: "no pv name",
|
||||
pvName: "",
|
||||
expectedMountPath: pluginDir + "/pv/test-pv1/globalmount",
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
var spec *volume.Spec
|
||||
|
||||
// Create spec
|
||||
pv := makeTestPV(tc.pvName, 10, testDriver, "testvol")
|
||||
spec = volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// Run
|
||||
mountPath, err := csiAttacher.GetDeviceMountPath(spec)
|
||||
|
||||
// Verify
|
||||
if err != nil && !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
} else if err == nil {
|
||||
if tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
} else if mountPath != tc.expectedMountPath {
|
||||
t.Errorf("mountPath does not equal expectedMountPath. Got: %s. Expected: %s", mountPath, tc.expectedMountPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherMountDevice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
volName string
|
||||
devicePath string
|
||||
deviceMountPath string
|
||||
stageUnstageSet bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "no vol name",
|
||||
volName: "",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "no device path",
|
||||
volName: "test-vol1",
|
||||
devicePath: "",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "no device mount path",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage cap not set",
|
||||
volName: "test-vol1",
|
||||
devicePath: "path1",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set no vars should not fail",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
var spec *volume.Spec
|
||||
pvName := "test-pv"
|
||||
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
||||
|
||||
nodeName := string(csiAttacher.plugin.host.GetNodeName())
|
||||
|
||||
// Create spec
|
||||
pv := makeTestPV(pvName, 10, testDriver, tc.volName)
|
||||
spec = volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
attachID := getAttachmentName(tc.volName, testDriver, nodeName)
|
||||
|
||||
// Set up volume attachment
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
go func() {
|
||||
fakeWatcher.Delete(attachment)
|
||||
}()
|
||||
|
||||
// Run
|
||||
err = csiAttacher.MountDevice(spec, tc.devicePath, tc.deviceMountPath)
|
||||
|
||||
// Verify
|
||||
if err != nil {
|
||||
if !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err == nil && tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
}
|
||||
|
||||
// Verify call goes through all the way
|
||||
numStaged := 1
|
||||
if !tc.stageUnstageSet {
|
||||
numStaged = 0
|
||||
}
|
||||
|
||||
cdc := csiAttacher.csiClient.(*csiDriverClient)
|
||||
staged := cdc.nodeClient.(*fake.NodeClient).GetNodeStagedVolumes()
|
||||
if len(staged) != numStaged {
|
||||
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", numStaged, len(staged))
|
||||
}
|
||||
if tc.stageUnstageSet {
|
||||
gotPath, ok := staged[tc.volName]
|
||||
if !ok {
|
||||
t.Errorf("could not find staged volume: %s", tc.volName)
|
||||
}
|
||||
if gotPath != tc.deviceMountPath {
|
||||
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, gotPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacherUnmountDevice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
volID string
|
||||
deviceMountPath string
|
||||
stageUnstageSet bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "no device mount path",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "missing part of device mount path",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "test volume name mismatch",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set no vars should not fail",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, _, tmpDir := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err0)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
||||
|
||||
// Add the volume to NodeStagedVolumes
|
||||
cdc := csiAttacher.csiClient.(*csiDriverClient)
|
||||
cdc.nodeClient.(*fake.NodeClient).AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
|
||||
|
||||
// Make the PV for this object
|
||||
dir := filepath.Dir(tc.deviceMountPath)
|
||||
// dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}
|
||||
pvName := filepath.Base(dir)
|
||||
pv := makeTestPV(pvName, 5, "csi", tc.volID)
|
||||
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
|
||||
if err != nil && !tc.shouldFail {
|
||||
t.Fatalf("Failed to create PV: %v", err)
|
||||
}
|
||||
|
||||
// Run
|
||||
err = csiAttacher.UnmountDevice(tc.deviceMountPath)
|
||||
|
||||
// Verify
|
||||
if err != nil {
|
||||
if !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err == nil && tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
}
|
||||
|
||||
// Verify call goes through all the way
|
||||
expectedSet := 0
|
||||
if !tc.stageUnstageSet {
|
||||
expectedSet = 1
|
||||
}
|
||||
staged := cdc.nodeClient.(*fake.NodeClient).GetNodeStagedVolumes()
|
||||
if len(staged) != expectedSet {
|
||||
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", expectedSet, len(staged))
|
||||
}
|
||||
|
||||
_, ok := staged[tc.volID]
|
||||
if ok && tc.stageUnstageSet {
|
||||
t.Errorf("found unexpected staged volume: %s", tc.volID)
|
||||
} else if !ok && !tc.stageUnstageSet {
|
||||
t.Errorf("could not find expected staged volume: %s", tc.volID)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// create a plugin mgr to load plugins and setup a fake client
|
||||
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.FakeWatcher, string) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't create temp dir: %v", err)
|
||||
}
|
||||
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
fakeWatcher := watch.NewFake()
|
||||
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
|
||||
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
|
||||
host := volumetest.NewFakeVolumeHost(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
)
|
||||
plugMgr := &volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(csiPluginName)
|
||||
if err != nil {
|
||||
t.Fatalf("can't find plugin %v", csiPluginName)
|
||||
}
|
||||
|
||||
csiPlug, ok := plug.(*csiPlugin)
|
||||
if !ok {
|
||||
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
||||
}
|
||||
|
||||
return csiPlug, fakeWatcher, tmpDir
|
||||
}
|
||||
|
195
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go
generated
vendored
195
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go
generated
vendored
@ -17,13 +17,11 @@ limitations under the License.
|
||||
package csi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi"
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
"github.com/golang/glog"
|
||||
grpctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
@ -31,19 +29,34 @@ import (
|
||||
)
|
||||
|
||||
type csiClient interface {
|
||||
AssertSupportedVersion(ctx grpctx.Context, ver *csipb.Version) error
|
||||
NodeProbe(ctx grpctx.Context, ver *csipb.Version) error
|
||||
NodePublishVolume(
|
||||
ctx grpctx.Context,
|
||||
volumeid string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error
|
||||
NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error
|
||||
NodeUnpublishVolume(
|
||||
ctx grpctx.Context,
|
||||
volID string,
|
||||
targetPath string,
|
||||
) error
|
||||
NodeStageVolume(ctx grpctx.Context,
|
||||
volID string,
|
||||
publishVolumeInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error
|
||||
NodeUnstageVolume(ctx grpctx.Context, volID, stagingTargetPath string) error
|
||||
NodeGetCapabilities(ctx grpctx.Context) ([]*csipb.NodeServiceCapability, error)
|
||||
}
|
||||
|
||||
// csiClient encapsulates all csi-plugin methods
|
||||
@ -89,68 +102,16 @@ func (c *csiDriverClient) assertConnection() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssertSupportedVersion ensures driver supports specified spec version.
|
||||
// If version is not supported, the assertion fails with an error.
|
||||
// This test should be done early during the storage operation flow to avoid
|
||||
// unnecessary calls later.
|
||||
func (c *csiDriverClient) AssertSupportedVersion(ctx grpctx.Context, ver *csipb.Version) error {
|
||||
if c.versionAsserted {
|
||||
if !c.versionSupported {
|
||||
return fmt.Errorf("version %s not supported", verToStr(ver))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.assertConnection(); err != nil {
|
||||
c.versionAsserted = false
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("asserting version supported by driver"))
|
||||
rsp, err := c.idClient.GetSupportedVersions(ctx, &csipb.GetSupportedVersionsRequest{})
|
||||
if err != nil {
|
||||
c.versionAsserted = false
|
||||
return err
|
||||
}
|
||||
|
||||
supported := false
|
||||
vers := rsp.GetSupportedVersions()
|
||||
glog.V(4).Info(log("driver reports %d versions supported: %s", len(vers), versToStr(vers)))
|
||||
|
||||
for _, v := range vers {
|
||||
//TODO (vladimirvivien) use more lenient/heuristic for exact or match of ranges etc
|
||||
if verToStr(v) == verToStr(ver) {
|
||||
supported = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
c.versionAsserted = true
|
||||
c.versionSupported = supported
|
||||
|
||||
if !supported {
|
||||
return fmt.Errorf("version %s not supported", verToStr(ver))
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("version %s supported", verToStr(ver)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeProbe(ctx grpctx.Context, ver *csipb.Version) error {
|
||||
glog.V(4).Info(log("sending NodeProbe rpc call to csi driver: [version %v]", ver))
|
||||
req := &csipb.NodeProbeRequest{Version: ver}
|
||||
_, err := c.nodeClient.NodeProbe(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodePublishVolume(
|
||||
ctx grpctx.Context,
|
||||
volID string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error {
|
||||
glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
|
||||
@ -166,13 +127,12 @@ func (c *csiDriverClient) NodePublishVolume(
|
||||
}
|
||||
|
||||
req := &csipb.NodePublishVolumeRequest{
|
||||
Version: csiVersion,
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
Readonly: readOnly,
|
||||
PublishVolumeInfo: volumeInfo,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
Readonly: readOnly,
|
||||
PublishInfo: volumeInfo,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
NodePublishSecrets: nodePublishSecrets,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
@ -184,6 +144,9 @@ func (c *csiDriverClient) NodePublishVolume(
|
||||
},
|
||||
},
|
||||
}
|
||||
if stagingTargetPath != "" {
|
||||
req.StagingTargetPath = stagingTargetPath
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodePublishVolume(ctx, req)
|
||||
return err
|
||||
@ -203,7 +166,6 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string,
|
||||
}
|
||||
|
||||
req := &csipb.NodeUnpublishVolumeRequest{
|
||||
Version: csiVersion,
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
}
|
||||
@ -212,6 +174,84 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string,
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeStageVolume(ctx grpctx.Context,
|
||||
volID string,
|
||||
publishInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error {
|
||||
glog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if stagingTargetPath == "" {
|
||||
return errors.New("missing staging target path")
|
||||
}
|
||||
if err := c.assertConnection(); err != nil {
|
||||
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
req := &csipb.NodeStageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
PublishInfo: publishInfo,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeStageSecrets: nodeStageSecrets,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeStageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeUnstageVolume(ctx grpctx.Context, volID, stagingTargetPath string) error {
|
||||
glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
}
|
||||
if stagingTargetPath == "" {
|
||||
return errors.New("missing staging target path")
|
||||
}
|
||||
if err := c.assertConnection(); err != nil {
|
||||
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
req := &csipb.NodeUnstageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
}
|
||||
_, err := c.nodeClient.NodeUnstageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeGetCapabilities(ctx grpctx.Context) ([]*csipb.NodeServiceCapability, error) {
|
||||
glog.V(4).Info(log("calling NodeGetCapabilities rpc"))
|
||||
if err := c.assertConnection(); err != nil {
|
||||
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
|
||||
return nil, err
|
||||
}
|
||||
req := &csipb.NodeGetCapabilitiesRequest{}
|
||||
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.GetCapabilities(), nil
|
||||
}
|
||||
|
||||
func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode {
|
||||
switch am {
|
||||
case api.ReadWriteOnce:
|
||||
@ -223,22 +263,3 @@ func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_A
|
||||
}
|
||||
return csipb.VolumeCapability_AccessMode_UNKNOWN
|
||||
}
|
||||
|
||||
func verToStr(ver *csipb.Version) string {
|
||||
if ver == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%d.%d.%d", ver.GetMajor(), ver.GetMinor(), ver.GetPatch())
|
||||
}
|
||||
|
||||
func versToStr(vers []*csipb.Version) string {
|
||||
if vers == nil {
|
||||
return ""
|
||||
}
|
||||
str := bytes.NewBufferString("[")
|
||||
for _, v := range vers {
|
||||
str.WriteString(fmt.Sprintf("{%s};", verToStr(v)))
|
||||
}
|
||||
str.WriteString("]")
|
||||
return str.String()
|
||||
}
|
||||
|
132
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client_test.go
generated
vendored
132
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client_test.go
generated
vendored
@ -20,70 +20,24 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi"
|
||||
grpctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||
)
|
||||
|
||||
func setupClient(t *testing.T) *csiDriverClient {
|
||||
func setupClient(t *testing.T, stageUnstageSet bool) *csiDriverClient {
|
||||
client := newCsiDriverClient("unix", "/tmp/test.sock")
|
||||
client.conn = new(grpc.ClientConn) //avoids creating conn object
|
||||
|
||||
// setup mock grpc clients
|
||||
client.idClient = fake.NewIdentityClient()
|
||||
client.nodeClient = fake.NewNodeClient()
|
||||
client.nodeClient = fake.NewNodeClient(stageUnstageSet)
|
||||
client.ctrlClient = fake.NewControllerClient()
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func TestClientAssertSupportedVersion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
ver *csipb.Version
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{testName: "supported version", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}},
|
||||
{testName: "unsupported version", ver: &csipb.Version{Major: 0, Minor: 0, Patch: 0}, mustFail: true},
|
||||
{testName: "grpc error", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}, mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Log("case: ", tc.testName)
|
||||
client := setupClient(t)
|
||||
client.idClient.(*fake.IdentityClient).SetNextError(tc.err)
|
||||
err := client.AssertSupportedVersion(grpctx.Background(), tc.ver)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("must fail, but err = nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeProbe(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
ver *csipb.Version
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{testName: "supported version", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}},
|
||||
{testName: "grpc error", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}, mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Log("case: ", tc.testName)
|
||||
client := setupClient(t)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodeProbe(grpctx.Background(), tc.ver)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("must fail, but err = nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodePublishVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
@ -100,24 +54,26 @@ func TestClientNodePublishVolume(t *testing.T) {
|
||||
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t)
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Log("case: ", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodePublishVolume(
|
||||
grpctx.Background(),
|
||||
tc.volID,
|
||||
false,
|
||||
"",
|
||||
tc.targetPath,
|
||||
api.ReadWriteOnce,
|
||||
map[string]string{"device": "/dev/null"},
|
||||
map[string]string{"attr0": "val0"},
|
||||
map[string]string{},
|
||||
tc.fsType,
|
||||
)
|
||||
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("must fail, but err is nil: ", err)
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -136,14 +92,82 @@ func TestClientNodeUnpublishVolume(t *testing.T) {
|
||||
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t)
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Log("case: ", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodeUnpublishVolume(grpctx.Background(), tc.volID, tc.targetPath)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("must fail, but err is nil: ", err)
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeStageVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
stagingTargetPath string
|
||||
fsType string
|
||||
secret map[string]string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", stagingTargetPath: "/test/path", fsType: "ext4"},
|
||||
{name: "missing volID", stagingTargetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "bad fs", volID: "vol-test", stagingTargetPath: "/test/path", fsType: "badfs", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodeStageVolume(
|
||||
grpctx.Background(),
|
||||
tc.volID,
|
||||
map[string]string{"device": "/dev/null"},
|
||||
tc.stagingTargetPath,
|
||||
tc.fsType,
|
||||
api.ReadWriteOnce,
|
||||
tc.secret,
|
||||
map[string]string{"attr0": "val0"},
|
||||
)
|
||||
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientNodeUnstageVolume(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
volID string
|
||||
stagingTargetPath string
|
||||
mustFail bool
|
||||
err error
|
||||
}{
|
||||
{name: "test ok", volID: "vol-test", stagingTargetPath: "/test/path"},
|
||||
{name: "missing volID", stagingTargetPath: "/test/path", mustFail: true},
|
||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
|
||||
}
|
||||
|
||||
client := setupClient(t, false)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodeUnstageVolume(
|
||||
grpctx.Background(),
|
||||
tc.volID, tc.stagingTargetPath,
|
||||
)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
104
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
104
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
@ -34,6 +34,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const defaultFSType = "ext4"
|
||||
|
||||
//TODO (vladimirvivien) move this in a central loc later
|
||||
var (
|
||||
volDataKey = struct {
|
||||
@ -85,8 +87,6 @@ func getTargetPath(uid types.UID, specVolumeID string, host volume.VolumeHost) s
|
||||
var _ volume.Mounter = &csiMountMgr{}
|
||||
|
||||
func (c *csiMountMgr) CanMount() error {
|
||||
//TODO (vladimirvivien) use this method to probe controller using CSI.NodeProbe() call
|
||||
// to ensure Node service is ready in the CSI plugin
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -114,29 +114,31 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
csi := c.csiClient
|
||||
nodeName := string(c.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// ensure version is supported
|
||||
if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to assert version: %v", err))
|
||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so
|
||||
deviceMountPath := ""
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// probe driver
|
||||
// TODO (vladimirvivien) move probe call where it is done only when it is needed.
|
||||
if err := csi.NodeProbe(ctx, csiVersion); err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to probe driver: %v", err))
|
||||
return err
|
||||
if stageUnstageSet {
|
||||
deviceMountPath, err = makeDeviceMountPath(c.plugin, c.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
if c.volumeInfo == nil {
|
||||
attachment, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
@ -149,14 +151,7 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
c.volumeInfo = attachment.Status.AttachmentMetadata
|
||||
}
|
||||
|
||||
// get volume attributes
|
||||
// TODO: for alpha vol atttributes are passed via PV.Annotations
|
||||
// Beta will fix that
|
||||
attribs, err := getVolAttribsFromSpec(c.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to extract volume attributes from PV annotations: %v", err))
|
||||
return err
|
||||
}
|
||||
attribs := csiSource.VolumeAttributes
|
||||
|
||||
// create target_dir before call to NodePublish
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
@ -189,15 +184,25 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
fsType := csiSource.FSType
|
||||
if len(fsType) == 0 {
|
||||
fsType = defaultFSType
|
||||
}
|
||||
nodePublishSecrets := map[string]string{}
|
||||
if csiSource.NodePublishSecretRef != nil {
|
||||
nodePublishSecrets = getCredentialsFromSecret(c.k8s, csiSource.NodePublishSecretRef)
|
||||
}
|
||||
err = csi.NodePublishVolume(
|
||||
ctx,
|
||||
c.volumeID,
|
||||
c.readOnly,
|
||||
deviceMountPath,
|
||||
dir,
|
||||
accessMode,
|
||||
c.volumeInfo,
|
||||
attribs,
|
||||
"ext4", //TODO needs to be sourced from PV or somewhere else
|
||||
nodePublishSecrets,
|
||||
fsType,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@ -240,10 +245,15 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||
}
|
||||
|
||||
if !mounted {
|
||||
glog.V(4).Info(log("unmounter.Teardown skipping unmout, dir not mounted [%s]", dir))
|
||||
glog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.TearDownAt failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// load volume info from file
|
||||
dataDir := path.Dir(dir) // dropoff /mount at end
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
@ -267,51 +277,22 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||
|
||||
csi := c.csiClient
|
||||
|
||||
// TODO make all assertion calls private within the client itself
|
||||
if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil {
|
||||
glog.Errorf(log("mounter.SetUpAt failed to assert version: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
|
||||
glog.Errorf(log("mounter.SetUpAt failed: %v", err))
|
||||
glog.Errorf(log("mounter.TearDownAt failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// clean mount point dir
|
||||
if err := removeMountDir(c.plugin, dir); err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to clean mount dir [%s]: %v", dir, err))
|
||||
glog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof(log("mounte.SetUpAt successfully unmounted dir [%s]", dir))
|
||||
glog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVolAttribsFromSpec exracts CSI VolumeAttributes information from PV.Annotations
|
||||
// using key csi.kubernetes.io/volume-attributes. The annotation value is expected
|
||||
// to be a JSON-encoded object of form {"key0":"val0",...,"keyN":"valN"}
|
||||
func getVolAttribsFromSpec(spec *volume.Spec) (map[string]string, error) {
|
||||
if spec == nil {
|
||||
return nil, errors.New("missing volume spec")
|
||||
}
|
||||
annotations := spec.PersistentVolume.GetAnnotations()
|
||||
if annotations == nil {
|
||||
return nil, nil // no annotations found
|
||||
}
|
||||
jsonAttribs := annotations[csiVolAttribsAnnotationKey]
|
||||
if jsonAttribs == "" {
|
||||
return nil, nil // csi annotation not found
|
||||
}
|
||||
attribs := map[string]string{}
|
||||
if err := json.Unmarshal([]byte(jsonAttribs), &attribs); err != nil {
|
||||
glog.Error(log("error parsing csi PV.Annotation [%s]=%s: %v", csiVolAttribsAnnotationKey, jsonAttribs, err))
|
||||
return nil, err
|
||||
}
|
||||
return attribs, nil
|
||||
}
|
||||
|
||||
// saveVolumeData persists parameter data as json file using the locagion
|
||||
// saveVolumeData persists parameter data as json file using the location
|
||||
// generated by /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
|
||||
func saveVolumeData(p *csiPlugin, podUID types.UID, specVolID string, data map[string]string) error {
|
||||
dir := getTargetPath(podUID, specVolID, p.host)
|
||||
@ -390,12 +371,19 @@ func removeMountDir(plug *csiPlugin, mountPath string) error {
|
||||
return err
|
||||
}
|
||||
// remove volume data file as well
|
||||
dataFile := path.Join(path.Dir(mountPath), volDataFileName)
|
||||
volPath := path.Dir(mountPath)
|
||||
dataFile := path.Join(volPath, volDataFileName)
|
||||
glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
|
||||
if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
|
||||
return err
|
||||
}
|
||||
// remove volume path
|
||||
glog.V(4).Info(log("deleting volume path [%s]", volPath))
|
||||
if err := os.Remove(volPath); err != nil && !os.IsNotExist(err) {
|
||||
glog.Error(log("failed to delete volume path [%s]: %v", volPath, err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
71
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go
generated
vendored
71
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1alpha1"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
@ -64,7 +64,7 @@ func TestMounterGetPath(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Log("test case:", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
mounter, err := plug.NewMounter(
|
||||
@ -78,7 +78,7 @@ func TestMounterGetPath(t *testing.T) {
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
|
||||
path := csiMounter.GetPath()
|
||||
t.Log("*** GetPath: ", path)
|
||||
t.Logf("*** GetPath: %s", path)
|
||||
|
||||
if tc.path != path {
|
||||
t.Errorf("expecting path %s, got %s", tc.path, path)
|
||||
@ -114,7 +114,7 @@ func TestMounterSetUp(t *testing.T) {
|
||||
}
|
||||
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
csiMounter.csiClient = setupClient(t)
|
||||
csiMounter.csiClient = setupClient(t, false)
|
||||
|
||||
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
|
||||
|
||||
@ -135,7 +135,7 @@ func TestMounterSetUp(t *testing.T) {
|
||||
DetachError: nil,
|
||||
},
|
||||
}
|
||||
_, err = csiMounter.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
|
||||
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
@ -172,7 +172,7 @@ func TestUnmounterTeardown(t *testing.T) {
|
||||
}
|
||||
|
||||
csiUnmounter := unmounter.(*csiMountMgr)
|
||||
csiUnmounter.csiClient = setupClient(t)
|
||||
csiUnmounter.csiClient = setupClient(t, false)
|
||||
|
||||
dir := csiUnmounter.GetPath()
|
||||
|
||||
@ -186,7 +186,7 @@ func TestUnmounterTeardown(t *testing.T) {
|
||||
"test-pv",
|
||||
map[string]string{volDataKey.specVolID: "test-pv", volDataKey.driverName: "driver", volDataKey.volHandle: "vol-handle"},
|
||||
); err != nil {
|
||||
t.Fatal("failed to save volume data:", err)
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
err = csiUnmounter.TearDownAt(dir)
|
||||
@ -202,53 +202,6 @@ func TestUnmounterTeardown(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestGetVolAttribsFromSpec(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
annotations map[string]string
|
||||
attribs map[string]string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "attribs ok",
|
||||
annotations: map[string]string{"key0": "val0", csiVolAttribsAnnotationKey: `{"k0":"attr0","k1":"attr1","k2":"attr2"}`, "keyN": "valN"},
|
||||
attribs: map[string]string{"k0": "attr0", "k1": "attr1", "k2": "attr2"},
|
||||
},
|
||||
|
||||
{
|
||||
name: "missing attribs",
|
||||
annotations: map[string]string{"key0": "val0", "keyN": "valN"},
|
||||
},
|
||||
{
|
||||
name: "missing annotations",
|
||||
},
|
||||
{
|
||||
name: "bad json",
|
||||
annotations: map[string]string{"key0": "val0", csiVolAttribsAnnotationKey: `{"k0""attr0","k1":"attr1,"k2":"attr2"`, "keyN": "valN"},
|
||||
attribs: map[string]string{"k0": "attr0", "k1": "attr1", "k2": "attr2"},
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, testDriver, testVol), false)
|
||||
for _, tc := range testCases {
|
||||
t.Log("test case:", tc.name)
|
||||
spec.PersistentVolume.Annotations = tc.annotations
|
||||
attribs, err := getVolAttribsFromSpec(spec)
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Error("test case should not fail, but err != nil", err)
|
||||
}
|
||||
eq := true
|
||||
for k, v := range attribs {
|
||||
if tc.attribs[k] != v {
|
||||
eq = false
|
||||
}
|
||||
}
|
||||
if !eq {
|
||||
t.Errorf("expecting attribs %#v, but got %#v", tc.attribs, attribs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveVolumeData(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
@ -262,7 +215,7 @@ func TestSaveVolumeData(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Log("test case:", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
specVolID := fmt.Sprintf("spec-volid-%d", i)
|
||||
mountDir := path.Join(getTargetPath(testPodUID, specVolID, plug.host), "/mount")
|
||||
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
@ -272,24 +225,24 @@ func TestSaveVolumeData(t *testing.T) {
|
||||
err := saveVolumeData(plug, testPodUID, specVolID, tc.data)
|
||||
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Error("unexpected failure: ", err)
|
||||
t.Errorf("unexpected failure: %v", err)
|
||||
}
|
||||
// did file get created
|
||||
dataDir := getTargetPath(testPodUID, specVolID, plug.host)
|
||||
file := path.Join(dataDir, volDataFileName)
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
t.Error("failed to create data dir:", err)
|
||||
t.Errorf("failed to create data dir: %v", err)
|
||||
}
|
||||
|
||||
// validate content
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Error("failed to read data file:", err)
|
||||
t.Errorf("failed to read data file: %v", err)
|
||||
}
|
||||
|
||||
jsonData := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(jsonData).Encode(tc.data); err != nil {
|
||||
t.Error("failed to encode json:", err)
|
||||
t.Errorf("failed to encode json: %v", err)
|
||||
}
|
||||
if string(data) != jsonData.String() {
|
||||
t.Errorf("expecting encoded data %v, got %v", string(data), jsonData)
|
||||
|
32
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
@ -19,10 +19,8 @@ package csi
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -32,8 +30,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
csiPluginName = "kubernetes.io/csi"
|
||||
csiVolAttribsAnnotationKey = "csi.volume.kubernetes.io/volume-attributes"
|
||||
csiPluginName = "kubernetes.io/csi"
|
||||
|
||||
// TODO (vladimirvivien) implement a more dynamic way to discover
|
||||
// the unix domain socket path for each installed csi driver.
|
||||
@ -45,12 +42,6 @@ const (
|
||||
volDataFileName = "vol_data.json"
|
||||
)
|
||||
|
||||
var (
|
||||
// csiVersion supported csi version
|
||||
csiVersion = &csipb.Version{Major: 0, Minor: 1, Patch: 0}
|
||||
driverNameRexp = regexp.MustCompile(`^[A-Za-z]+(\.?-?_?[A-Za-z0-9-])+$`)
|
||||
)
|
||||
|
||||
type csiPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
@ -85,12 +76,6 @@ func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
//TODO (vladimirvivien) this validation should be done at the API validation check
|
||||
if !isDriverNameValid(csi.Driver) {
|
||||
glog.Error(log("plugin.GetVolumeName failed to create volume name: invalid csi driver name %s", csi.Driver))
|
||||
return "", errors.New("invalid csi driver name")
|
||||
}
|
||||
|
||||
// return driverName<separator>volumeHandle
|
||||
return fmt.Sprintf("%s%s%s", csi.Driver, volNameSep, csi.VolumeHandle), nil
|
||||
}
|
||||
@ -114,13 +99,6 @@ func (p *csiPlugin) NewMounter(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO (vladimirvivien) consider moving this check in API validation
|
||||
// check Driver name to conform to CSI spec
|
||||
if !isDriverNameValid(pvSource.Driver) {
|
||||
glog.Error(log("driver name does not conform to CSI spec: %s", pvSource.Driver))
|
||||
return nil, errors.New("driver name is invalid")
|
||||
}
|
||||
|
||||
// before it is used in any paths such as socket etc
|
||||
addr := fmt.Sprintf(csiAddrTemplate, pvSource.Driver)
|
||||
glog.V(4).Infof(log("setting up mounter for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
|
||||
@ -243,11 +221,3 @@ func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, er
|
||||
func log(msg string, parts ...interface{}) string {
|
||||
return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...)
|
||||
}
|
||||
|
||||
// isDriverNameValid validates the driverName using CSI spec
|
||||
func isDriverNameValid(name string) bool {
|
||||
if len(name) == 0 || len(name) > 63 {
|
||||
return false
|
||||
}
|
||||
return driverNameRexp.MatchString(name)
|
||||
}
|
||||
|
39
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin_test.go
generated
vendored
39
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin_test.go
generated
vendored
@ -64,8 +64,7 @@ func newTestPlugin(t *testing.T) (*csiPlugin, string) {
|
||||
func makeTestPV(name string, sizeGig int, driverName, volID string) *api.PersistentVolume {
|
||||
return &api.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: testns,
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
@ -230,11 +229,11 @@ func TestPluginNewUnmounter(t *testing.T) {
|
||||
csiUnmounter := unmounter.(*csiMountMgr)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
t.Fatalf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
if csiUnmounter == nil {
|
||||
t.Fatal("failed to create CSI mounter")
|
||||
t.Fatal("failed to create CSI Unmounter")
|
||||
}
|
||||
|
||||
if csiUnmounter.podUID != testPodUID {
|
||||
@ -243,36 +242,6 @@ func TestPluginNewUnmounter(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestValidateDriverName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
driverName string
|
||||
valid bool
|
||||
}{
|
||||
|
||||
{"ok no punctuations", "comgooglestoragecsigcepd", true},
|
||||
{"ok dot only", "io.kubernetes.storage.csi.flex", true},
|
||||
{"ok dash only", "io-kubernetes-storage-csi-flex", true},
|
||||
{"ok underscore only", "io_kubernetes_storage_csi_flex", true},
|
||||
{"ok dot underscores", "io.kubernetes.storage_csi.flex", true},
|
||||
{"ok dot dash underscores", "io.kubernetes-storage.csi_flex", true},
|
||||
|
||||
{"invalid length 0", "", false},
|
||||
{"invalid length > 63", "comgooglestoragecsigcepdcomgooglestoragecsigcepdcomgooglestoragecsigcepdcomgooglestoragecsigcepd", false},
|
||||
{"invalid start char", "_comgooglestoragecsigcepd", false},
|
||||
{"invalid end char", "comgooglestoragecsigcepd/", false},
|
||||
{"invalid separators", "com/google/storage/csi~gcepd", false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %v", tc.name)
|
||||
drValid := isDriverNameValid(tc.driverName)
|
||||
if tc.valid != drValid {
|
||||
t.Errorf("expecting driverName %s as valid=%t, but got valid=%t", tc.driverName, tc.valid, drValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewAttacher(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
@ -305,6 +274,6 @@ func TestPluginNewDetacher(t *testing.T) {
|
||||
t.Error("plugin not set for detacher")
|
||||
}
|
||||
if csiDetacher.k8s == nil {
|
||||
t.Error("Kubernetes client not set for attacher")
|
||||
t.Error("Kubernetes client not set for detacher")
|
||||
}
|
||||
}
|
||||
|
38
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) map[string]string {
|
||||
credentials := map[string]string{}
|
||||
secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
|
||||
return credentials
|
||||
}
|
||||
for key, value := range secret.Data {
|
||||
credentials[key] = string(value)
|
||||
}
|
||||
|
||||
return credentials
|
||||
}
|
2
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD
generated
vendored
@ -6,7 +6,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi/fake",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
],
|
||||
|
130
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/fake_client.go
generated
vendored
130
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/fake_client.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi"
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
grpctx "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -42,39 +42,36 @@ func (f *IdentityClient) SetNextError(err error) {
|
||||
f.nextErr = err
|
||||
}
|
||||
|
||||
// GetSupportedVersions returns supported version
|
||||
func (f *IdentityClient) GetSupportedVersions(ctx grpctx.Context, req *csipb.GetSupportedVersionsRequest, opts ...grpc.CallOption) (*csipb.GetSupportedVersionsResponse, error) {
|
||||
// short circuit with an error
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
rsp := &csipb.GetSupportedVersionsResponse{
|
||||
SupportedVersions: []*csipb.Version{
|
||||
{Major: 0, Minor: 0, Patch: 1},
|
||||
{Major: 0, Minor: 1, Patch: 0},
|
||||
{Major: 1, Minor: 0, Patch: 0},
|
||||
{Major: 1, Minor: 0, Patch: 1},
|
||||
{Major: 1, Minor: 1, Patch: 1},
|
||||
},
|
||||
}
|
||||
return rsp, nil
|
||||
}
|
||||
|
||||
// GetPluginInfo returns plugin info
|
||||
func (f *IdentityClient) GetPluginInfo(ctx context.Context, in *csipb.GetPluginInfoRequest, opts ...grpc.CallOption) (*csipb.GetPluginInfoResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPluginCapabilities implements csi method
|
||||
func (f *IdentityClient) GetPluginCapabilities(ctx context.Context, in *csipb.GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.GetPluginCapabilitiesResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Probe implements csi method
|
||||
func (f *IdentityClient) Probe(ctx context.Context, in *csipb.ProbeRequest, opts ...grpc.CallOption) (*csipb.ProbeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NodeClient returns CSI node client
|
||||
type NodeClient struct {
|
||||
nodePublishedVolumes map[string]string
|
||||
nodeStagedVolumes map[string]string
|
||||
stageUnstageSet bool
|
||||
nextErr error
|
||||
}
|
||||
|
||||
// NewNodeClient returns fake node client
|
||||
func NewNodeClient() *NodeClient {
|
||||
return &NodeClient{nodePublishedVolumes: make(map[string]string)}
|
||||
func NewNodeClient(stageUnstageSet bool) *NodeClient {
|
||||
return &NodeClient{
|
||||
nodePublishedVolumes: make(map[string]string),
|
||||
nodeStagedVolumes: make(map[string]string),
|
||||
stageUnstageSet: stageUnstageSet,
|
||||
}
|
||||
}
|
||||
|
||||
// SetNextError injects next expected error
|
||||
@ -87,6 +84,15 @@ func (f *NodeClient) GetNodePublishedVolumes() map[string]string {
|
||||
return f.nodePublishedVolumes
|
||||
}
|
||||
|
||||
// GetNodeStagedVolumes returns node staged volumes
|
||||
func (f *NodeClient) GetNodeStagedVolumes() map[string]string {
|
||||
return f.nodeStagedVolumes
|
||||
}
|
||||
|
||||
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string) {
|
||||
f.nodeStagedVolumes[volID] = deviceMountPath
|
||||
}
|
||||
|
||||
// NodePublishVolume implements CSI NodePublishVolume
|
||||
func (f *NodeClient) NodePublishVolume(ctx grpctx.Context, req *csipb.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodePublishVolumeResponse, error) {
|
||||
|
||||
@ -103,23 +109,12 @@ func (f *NodeClient) NodePublishVolume(ctx grpctx.Context, req *csipb.NodePublis
|
||||
fsTypes := "ext4|xfs|zfs"
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
if !strings.Contains(fsTypes, fsType) {
|
||||
return nil, errors.New("invlid fstype")
|
||||
return nil, errors.New("invalid fstype")
|
||||
}
|
||||
f.nodePublishedVolumes[req.GetVolumeId()] = req.GetTargetPath()
|
||||
return &csipb.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeProbe implements csi NodeProbe
|
||||
func (f *NodeClient) NodeProbe(ctx context.Context, req *csipb.NodeProbeRequest, opts ...grpc.CallOption) (*csipb.NodeProbeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
if req.Version == nil {
|
||||
return nil, errors.New("missing version")
|
||||
}
|
||||
return &csipb.NodeProbeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeUnpublishVolume implements csi method
|
||||
func (f *NodeClient) NodeUnpublishVolume(ctx context.Context, req *csipb.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeUnpublishVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
@ -136,13 +131,71 @@ func (f *NodeClient) NodeUnpublishVolume(ctx context.Context, req *csipb.NodeUnp
|
||||
return &csipb.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// GetNodeID implements method
|
||||
func (f *NodeClient) GetNodeID(ctx context.Context, in *csipb.GetNodeIDRequest, opts ...grpc.CallOption) (*csipb.GetNodeIDResponse, error) {
|
||||
// NodeStagevolume implements csi method
|
||||
func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeStageVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetStagingTargetPath() == "" {
|
||||
return nil, errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
fsType := ""
|
||||
fsTypes := "ext4|xfs|zfs"
|
||||
mounted := req.GetVolumeCapability().GetMount()
|
||||
if mounted != nil {
|
||||
fsType = mounted.GetFsType()
|
||||
}
|
||||
if !strings.Contains(fsTypes, fsType) {
|
||||
return nil, errors.New("invalid fstype")
|
||||
}
|
||||
|
||||
f.nodeStagedVolumes[req.GetVolumeId()] = req.GetStagingTargetPath()
|
||||
return &csipb.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeUnstageVolume implements csi method
|
||||
func (f *NodeClient) NodeUnstageVolume(ctx context.Context, req *csipb.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeUnstageVolumeResponse, error) {
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
}
|
||||
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, errors.New("missing volume id")
|
||||
}
|
||||
if req.GetStagingTargetPath() == "" {
|
||||
return nil, errors.New("missing staging target path")
|
||||
}
|
||||
|
||||
delete(f.nodeStagedVolumes, req.GetVolumeId())
|
||||
return &csipb.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// NodeGetId implements method
|
||||
func (f *NodeClient) NodeGetId(ctx context.Context, in *csipb.NodeGetIdRequest, opts ...grpc.CallOption) (*csipb.NodeGetIdResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NodeGetCapabilities implements csi method
|
||||
func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.NodeGetCapabilitiesResponse, error) {
|
||||
resp := &csipb.NodeGetCapabilitiesResponse{
|
||||
Capabilities: []*csipb.NodeServiceCapability{
|
||||
{
|
||||
Type: &csipb.NodeServiceCapability_Rpc{
|
||||
Rpc: &csipb.NodeServiceCapability_RPC{
|
||||
Type: csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if f.stageUnstageSet {
|
||||
return resp, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -223,8 +276,3 @@ func (f *ControllerClient) ListVolumes(ctx context.Context, in *csipb.ListVolume
|
||||
func (f *ControllerClient) GetCapacity(ctx context.Context, in *csipb.GetCapacityRequest, opts ...grpc.CallOption) (*csipb.GetCapacityResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ControllerProbe implements csi method
|
||||
func (f *ControllerClient) ControllerProbe(ctx context.Context, in *csipb.ControllerProbeRequest, opts ...grpc.CallOption) (*csipb.ControllerProbeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD
generated
vendored
@ -26,8 +26,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["downwardapi_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/downwardapi",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
@ -183,6 +183,9 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, *b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode)
|
||||
if err != nil {
|
||||
@ -283,7 +286,7 @@ func (c *downwardAPIVolumeUnmounter) TearDown() error {
|
||||
}
|
||||
|
||||
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
||||
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD
generated
vendored
@ -11,11 +11,40 @@ go_library(
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"empty_dir.go",
|
||||
"empty_dir_unsupported.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"empty_dir_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"empty_dir_unsupported.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/empty_dir",
|
||||
@ -31,7 +60,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
@ -41,15 +70,14 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"empty_dir_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/empty_dir",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go
generated
vendored
@ -155,20 +155,12 @@ func (plugin *emptyDirPlugin) ConstructVolumeSpec(volName, mountPath string) (*v
|
||||
type mountDetector interface {
|
||||
// GetMountMedium determines what type of medium a given path is backed
|
||||
// by and whether that path is a mount point. For example, if this
|
||||
// returns (mediumMemory, false, nil), the caller knows that the path is
|
||||
// returns (v1.StorageMediumMemory, false, nil), the caller knows that the path is
|
||||
// on a memory FS (tmpfs on Linux) but is not the root mountpoint of
|
||||
// that tmpfs.
|
||||
GetMountMedium(path string) (storageMedium, bool, error)
|
||||
GetMountMedium(path string) (v1.StorageMedium, bool, error)
|
||||
}
|
||||
|
||||
type storageMedium int
|
||||
|
||||
const (
|
||||
mediumUnknown storageMedium = 0 // assume anything we don't explicitly handle is this
|
||||
mediumMemory storageMedium = 1 // memory (e.g. tmpfs on linux)
|
||||
mediumHugepages storageMedium = 2 // hugepages
|
||||
)
|
||||
|
||||
// EmptyDir volumes are temporary directories exposed to the pod.
|
||||
// These do not persist beyond the lifetime of a pod.
|
||||
type emptyDir struct {
|
||||
@ -257,7 +249,7 @@ func (ed *emptyDir) setupTmpfs(dir string) error {
|
||||
}
|
||||
// If the directory is a mountpoint with medium memory, there is no
|
||||
// work to do since we are already in the desired state.
|
||||
if isMnt && medium == mediumMemory {
|
||||
if isMnt && medium == v1.StorageMediumMemory {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -280,7 +272,7 @@ func (ed *emptyDir) setupHugepages(dir string) error {
|
||||
}
|
||||
// If the directory is a mountpoint with medium hugepages, there is no
|
||||
// work to do since we are already in the desired state.
|
||||
if isMnt && medium == mediumHugepages {
|
||||
if isMnt && medium == v1.StorageMediumHugePages {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -388,10 +380,10 @@ func (ed *emptyDir) TearDownAt(dir string) error {
|
||||
return err
|
||||
}
|
||||
if isMnt {
|
||||
if medium == mediumMemory {
|
||||
if medium == v1.StorageMediumMemory {
|
||||
ed.medium = v1.StorageMediumMemory
|
||||
return ed.teardownTmpfsOrHugetlbfs(dir)
|
||||
} else if medium == mediumHugepages {
|
||||
} else if medium == v1.StorageMediumHugePages {
|
||||
ed.medium = v1.StorageMediumHugePages
|
||||
return ed.teardownTmpfsOrHugetlbfs(dir)
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
@ -37,22 +39,22 @@ type realMountDetector struct {
|
||||
mounter mount.Interface
|
||||
}
|
||||
|
||||
func (m *realMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
|
||||
func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) {
|
||||
glog.V(5).Infof("Determining mount medium of %v", path)
|
||||
notMnt, err := m.mounter.IsLikelyNotMountPoint(path)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err)
|
||||
return v1.StorageMediumDefault, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err)
|
||||
}
|
||||
buf := unix.Statfs_t{}
|
||||
if err := unix.Statfs(path, &buf); err != nil {
|
||||
return 0, false, fmt.Errorf("statfs(%q): %v", path, err)
|
||||
return v1.StorageMediumDefault, false, fmt.Errorf("statfs(%q): %v", path, err)
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Statfs_t of %v: %+v", path, buf)
|
||||
if buf.Type == linuxTmpfsMagic {
|
||||
return mediumMemory, !notMnt, nil
|
||||
return v1.StorageMediumMemory, !notMnt, nil
|
||||
} else if int64(buf.Type) == linuxHugetlbfsMagic {
|
||||
return mediumHugepages, !notMnt, nil
|
||||
return v1.StorageMediumHugePages, !notMnt, nil
|
||||
}
|
||||
return mediumUnknown, !notMnt, nil
|
||||
return v1.StorageMediumDefault, !notMnt, nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_test.go
generated
vendored
@ -66,11 +66,11 @@ func TestCanSupport(t *testing.T) {
|
||||
}
|
||||
|
||||
type fakeMountDetector struct {
|
||||
medium storageMedium
|
||||
medium v1.StorageMedium
|
||||
isMount bool
|
||||
}
|
||||
|
||||
func (fake *fakeMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
|
||||
func (fake *fakeMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) {
|
||||
return fake.medium, fake.isMount, nil
|
||||
}
|
||||
|
||||
@ -196,9 +196,9 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
|
||||
physicalMounter.ResetLog()
|
||||
|
||||
// Make an unmounter for the volume
|
||||
teardownMedium := mediumUnknown
|
||||
teardownMedium := v1.StorageMediumDefault
|
||||
if config.medium == v1.StorageMediumMemory {
|
||||
teardownMedium = mediumMemory
|
||||
teardownMedium = v1.StorageMediumMemory
|
||||
}
|
||||
unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
|
||||
unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector)
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go
generated
vendored
@ -19,6 +19,7 @@ limitations under the License.
|
||||
package empty_dir
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
@ -27,6 +28,6 @@ type realMountDetector struct {
|
||||
mounter mount.Interface
|
||||
}
|
||||
|
||||
func (m *realMountDetector) GetMountMedium(path string) (storageMedium, bool, error) {
|
||||
return mediumUnknown, false, nil
|
||||
func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) {
|
||||
return v1.StorageMediumDefault, false, nil
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD
generated
vendored
@ -22,7 +22,7 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -37,12 +37,12 @@ go_test(
|
||||
"fc_test.go",
|
||||
"fc_util_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/fc",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go
generated
vendored
@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
type fcAttacher struct {
|
||||
@ -113,7 +112,7 @@ func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, de
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(fcPluginName)}
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
@ -189,7 +188,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := volumehelper.GetVolumeMode(spec)
|
||||
volumeMode, err := volumeutil.GetVolumeMode(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -199,14 +198,16 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun
|
||||
fsType: fc.FSType,
|
||||
volumeMode: volumeMode,
|
||||
readOnly: readOnly,
|
||||
mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
||||
mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
return &fcDiskMounter{
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -215,6 +216,7 @@ func volumeSpecToUnmounter(mounter mount.Interface) *fcDiskUnmounter {
|
||||
fcDisk: &fcDisk{
|
||||
io: &osIOHandler{},
|
||||
},
|
||||
mounter: mounter,
|
||||
mounter: mounter,
|
||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||
}
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go
generated
vendored
@ -31,7 +31,9 @@ type diskManager interface {
|
||||
// Attaches the disk to the kubelet's host machine.
|
||||
AttachDisk(b fcDiskMounter) (string, error)
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
DetachDisk(disk fcDiskUnmounter, devName string) error
|
||||
DetachDisk(disk fcDiskUnmounter, devicePath string) error
|
||||
// Detaches the block disk from the kubelet's host machine.
|
||||
DetachBlockFCDisk(disk fcDiskUnmapper, mntPath, devicePath string) error
|
||||
}
|
||||
|
||||
// utility to mount a disk based filesystem
|
||||
|
127
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go
generated
vendored
127
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go
generated
vendored
@ -18,6 +18,7 @@ package fc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -31,7 +32,7 @@ import (
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
@ -81,11 +82,7 @@ func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
if (spec.Volume != nil && spec.Volume.FC == nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FC == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return (spec.Volume != nil && spec.Volume.FC != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FC != nil)
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) RequiresRemount() bool {
|
||||
@ -136,7 +133,7 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := volumehelper.GetVolumeMode(spec)
|
||||
volumeMode, err := util.GetVolumeMode(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -147,13 +144,15 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
|
||||
volumeMode: volumeMode,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
return &fcDiskMounter{
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
|
||||
}
|
||||
@ -189,8 +188,9 @@ func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID t
|
||||
manager: manager,
|
||||
io: &osIOHandler{},
|
||||
plugin: plugin},
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -208,7 +208,8 @@ func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, m
|
||||
plugin: plugin,
|
||||
io: &osIOHandler{},
|
||||
},
|
||||
mounter: mounter,
|
||||
mounter: mounter,
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -225,42 +226,91 @@ func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, ma
|
||||
plugin: plugin,
|
||||
io: &osIOHandler{},
|
||||
},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
fcVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{},
|
||||
},
|
||||
// Find globalPDPath from pod volume directory(mountPath)
|
||||
// examples:
|
||||
// mountPath: pods/{podUid}/volumes/kubernetes.io~fc/{volumeName}
|
||||
// globalPDPath : plugins/kubernetes.io/fc/50060e801049cfd1-lun-0
|
||||
var globalPDPath string
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
paths, err := mount.GetMountRefs(mounter, mountPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, path := range paths {
|
||||
if strings.Contains(path, plugin.host.GetPluginDir(fcPluginName)) {
|
||||
globalPDPath = path
|
||||
break
|
||||
}
|
||||
}
|
||||
// Couldn't fetch globalPDPath
|
||||
if len(globalPDPath) == 0 {
|
||||
return nil, fmt.Errorf("couldn't fetch globalPDPath. failed to obtain volume spec")
|
||||
}
|
||||
arr := strings.Split(globalPDPath, "/")
|
||||
if len(arr) < 1 {
|
||||
return nil, fmt.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath)
|
||||
}
|
||||
volumeInfo := arr[len(arr)-1]
|
||||
// Create volume from wwn+lun or wwid
|
||||
var fcVolume *v1.Volume
|
||||
if strings.Contains(volumeInfo, "-lun-") {
|
||||
wwnLun := strings.Split(volumeInfo, "-lun-")
|
||||
if len(wwnLun) < 2 {
|
||||
return nil, fmt.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo)
|
||||
}
|
||||
lun, err := strconv.Atoi(wwnLun[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lun32 := int32(lun)
|
||||
fcVolume = &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32},
|
||||
},
|
||||
}
|
||||
glog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v",
|
||||
fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun)
|
||||
} else {
|
||||
fcVolume = &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
FC: &v1.FCVolumeSource{WWIDs: []string{volumeInfo}},
|
||||
},
|
||||
}
|
||||
glog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs)
|
||||
}
|
||||
return volume.NewSpecFromVolume(fcVolume), nil
|
||||
}
|
||||
|
||||
// ConstructBlockVolumeSpec creates a new volume.Spec with following steps.
|
||||
// - Searchs a file whose name is {pod uuid} under volume plugin directory.
|
||||
// - Searches a file whose name is {pod uuid} under volume plugin directory.
|
||||
// - If a file is found, then retreives volumePluginDependentPath from globalMapPathUUID.
|
||||
// - Once volumePluginDependentPath is obtained, store volume information to VolumeSource
|
||||
// examples:
|
||||
// mapPath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
|
||||
// mapPath: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
|
||||
// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid}
|
||||
func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName)
|
||||
blkutil := util.NewBlockVolumePathHandler()
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
|
||||
|
||||
// Retreive volumePluginDependentPath from globalMapPathUUID
|
||||
// Retrieve volumePluginDependentPath from globalMapPathUUID
|
||||
// globalMapPathUUID examples:
|
||||
// wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/{pod uuid}
|
||||
// wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/{pod uuid}
|
||||
arr := strings.Split(globalMapPathUUID, "/")
|
||||
if len(arr) < 2 {
|
||||
return nil, fmt.Errorf("Fail to retreive volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
|
||||
return nil, fmt.Errorf("Fail to retrieve volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
|
||||
}
|
||||
l := len(arr) - 2
|
||||
volumeInfo := arr[l]
|
||||
@ -278,7 +328,7 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m
|
||||
v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32})
|
||||
glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v",
|
||||
fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs,
|
||||
fcPV.Spec.PersistentVolumeSource.FC.Lun)
|
||||
*fcPV.Spec.PersistentVolumeSource.FC.Lun)
|
||||
} else {
|
||||
fcPV = createPersistentVolumeFromFCVolumeSource(volumeName,
|
||||
v1.FCVolumeSource{WWIDs: []string{volumeInfo}})
|
||||
@ -328,6 +378,7 @@ type fcDiskMounter struct {
|
||||
fsType string
|
||||
volumeMode v1.PersistentVolumeMode
|
||||
mounter *mount.SafeFormatAndMount
|
||||
deviceUtil util.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &fcDiskMounter{}
|
||||
@ -362,7 +413,8 @@ func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
|
||||
type fcDiskUnmounter struct {
|
||||
*fcDisk
|
||||
mounter mount.Interface
|
||||
mounter mount.Interface
|
||||
deviceUtil util.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &fcDiskUnmounter{}
|
||||
@ -380,8 +432,9 @@ func (c *fcDiskUnmounter) TearDownAt(dir string) error {
|
||||
// Block Volumes Support
|
||||
type fcDiskMapper struct {
|
||||
*fcDisk
|
||||
readOnly bool
|
||||
mounter mount.Interface
|
||||
readOnly bool
|
||||
mounter mount.Interface
|
||||
deviceUtil util.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &fcDiskMapper{}
|
||||
@ -392,18 +445,22 @@ func (b *fcDiskMapper) SetUpDevice() (string, error) {
|
||||
|
||||
type fcDiskUnmapper struct {
|
||||
*fcDisk
|
||||
deviceUtil util.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &fcDiskUnmapper{}
|
||||
|
||||
func (c *fcDiskUnmapper) TearDownDevice(_, devicePath string) error {
|
||||
// Remove scsi device from the node.
|
||||
if !strings.HasPrefix(devicePath, "/dev/") {
|
||||
return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath)
|
||||
func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
err := c.manager.DetachBlockFCDisk(*c, mapPath, devicePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", mapPath, err)
|
||||
}
|
||||
arr := strings.Split(devicePath, "/")
|
||||
dev := arr[len(arr)-1]
|
||||
removeFromScsiSubsystem(dev, c.io)
|
||||
glog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath)
|
||||
err = os.RemoveAll(mapPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to delete the directory: %s\nError: %v", mapPath, err)
|
||||
}
|
||||
glog.V(4).Infof("fc: successfully detached disk: %s", mapPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
105
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go
generated
vendored
105
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go
generated
vendored
@ -19,6 +19,8 @@ package fc
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -48,9 +50,24 @@ func TestCanSupport(t *testing.T) {
|
||||
if plug.GetPluginName() != "kubernetes.io/fc" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{FC: &v1.FCVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FC: &v1.FCVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
@ -120,6 +137,15 @@ func (fake *fakeDiskManager) DetachDisk(c fcDiskUnmounter, mntPath string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
|
||||
err := os.RemoveAll(mapPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fake.detachCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("fc_test")
|
||||
if err != nil {
|
||||
@ -162,13 +188,6 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fakeManager2 := NewFakeDiskManager()
|
||||
defer fakeManager2.Cleanup()
|
||||
@ -403,3 +422,75 @@ func Test_getWwnsLunWwidsError(t *testing.T) {
|
||||
t.Errorf("unexpected fc disk found")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ConstructVolumeSpec(t *testing.T) {
|
||||
fm := &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
|
||||
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50060e801049cfd1-lun-0"},
|
||||
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2"},
|
||||
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000"},
|
||||
},
|
||||
}
|
||||
mountPaths := []string{
|
||||
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
|
||||
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2",
|
||||
}
|
||||
for _, path := range mountPaths {
|
||||
refs, _ := mount.GetMountRefs(fm, path)
|
||||
var globalPDPath string
|
||||
for _, ref := range refs {
|
||||
if strings.Contains(ref, "kubernetes.io/fc") {
|
||||
globalPDPath = ref
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(globalPDPath) == 0 {
|
||||
t.Errorf("couldn't fetch mountrefs")
|
||||
}
|
||||
arr := strings.Split(globalPDPath, "/")
|
||||
if len(arr) < 1 {
|
||||
t.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath)
|
||||
}
|
||||
volumeInfo := arr[len(arr)-1]
|
||||
if strings.Contains(volumeInfo, "-lun-") {
|
||||
wwnLun := strings.Split(volumeInfo, "-lun-")
|
||||
if len(wwnLun) < 2 {
|
||||
t.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo)
|
||||
}
|
||||
lun, _ := strconv.Atoi(wwnLun[1])
|
||||
lun32 := int32(lun)
|
||||
if wwnLun[0] != "50060e801049cfd1" || lun32 != 0 {
|
||||
t.Errorf("failed to retrieve TargetWWN and Lun")
|
||||
}
|
||||
} else {
|
||||
if volumeInfo != "3600508b400105e210000900000490000" {
|
||||
t.Errorf("failed to retrieve WWIDs")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ConstructVolumeSpecNoRefs(t *testing.T) {
|
||||
fm := &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{Device: "/dev/sdd", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
|
||||
},
|
||||
}
|
||||
mountPaths := []string{
|
||||
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
|
||||
}
|
||||
for _, path := range mountPaths {
|
||||
refs, _ := mount.GetMountRefs(fm, path)
|
||||
var globalPDPath string
|
||||
for _, ref := range refs {
|
||||
if strings.Contains(ref, "kubernetes.io/fc") {
|
||||
globalPDPath = ref
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(globalPDPath) != 0 {
|
||||
t.Errorf("invalid globalPDPath")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
210
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go
generated
vendored
210
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go
generated
vendored
@ -29,6 +29,8 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
type ioHandler interface {
|
||||
@ -40,6 +42,11 @@ type ioHandler interface {
|
||||
|
||||
type osIOHandler struct{}
|
||||
|
||||
const (
|
||||
byPath = "/dev/disk/by-path/"
|
||||
byID = "/dev/disk/by-id/"
|
||||
)
|
||||
|
||||
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
@ -53,37 +60,17 @@ func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.File
|
||||
return ioutil.WriteFile(filename, data, perm)
|
||||
}
|
||||
|
||||
// given a disk path like /dev/sdx, find the devicemapper parent
|
||||
// TODO #23192 Convert this code to use the generic code in ../util
|
||||
// which is used by the iSCSI implementation
|
||||
func findMultipathDeviceMapper(disk string, io ioHandler) string {
|
||||
sys_path := "/sys/block/"
|
||||
if dirs, err := io.ReadDir(sys_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if strings.HasPrefix(name, "dm-") {
|
||||
if _, err1 := io.Lstat(sys_path + name + "/slaves/" + disk); err1 == nil {
|
||||
return "/dev/" + name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// given a wwn and lun, find the device and associated devicemapper parent
|
||||
func findDisk(wwn, lun string, io ioHandler) (string, string) {
|
||||
func findDisk(wwn, lun string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) {
|
||||
fc_path := "-fc-0x" + wwn + "-lun-" + lun
|
||||
dev_path := "/dev/disk/by-path/"
|
||||
dev_path := byPath
|
||||
if dirs, err := io.ReadDir(dev_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if strings.Contains(name, fc_path) {
|
||||
if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil {
|
||||
arr := strings.Split(disk, "/")
|
||||
l := len(arr) - 1
|
||||
dev := arr[l]
|
||||
dm := findMultipathDeviceMapper(dev, io)
|
||||
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
|
||||
glog.Infof("fc: find disk: %v, dm: %v", disk, dm)
|
||||
return disk, dm
|
||||
}
|
||||
}
|
||||
@ -93,7 +80,7 @@ func findDisk(wwn, lun string, io ioHandler) (string, string) {
|
||||
}
|
||||
|
||||
// given a wwid, find the device and associated devicemapper parent
|
||||
func findDiskWWIDs(wwid string, io ioHandler) (string, string) {
|
||||
func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) {
|
||||
// Example wwid format:
|
||||
// 3600508b400105e210000900000490000
|
||||
// <VENDOR NAME> <IDENTIFIER NUMBER>
|
||||
@ -104,7 +91,7 @@ func findDiskWWIDs(wwid string, io ioHandler) (string, string) {
|
||||
// underscore when wwid is exposed under /dev/by-id.
|
||||
|
||||
fc_path := "scsi-" + wwid
|
||||
dev_id := "/dev/disk/by-id/"
|
||||
dev_id := byID
|
||||
if dirs, err := io.ReadDir(dev_id); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
@ -114,10 +101,8 @@ func findDiskWWIDs(wwid string, io ioHandler) (string, string) {
|
||||
glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", dev_id+name, err)
|
||||
return "", ""
|
||||
}
|
||||
arr := strings.Split(disk, "/")
|
||||
l := len(arr) - 1
|
||||
dev := arr[l]
|
||||
dm := findMultipathDeviceMapper(dev, io)
|
||||
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
|
||||
glog.Infof("fc: find disk: %v, dm: %v", disk, dm)
|
||||
return disk, dm
|
||||
}
|
||||
}
|
||||
@ -197,9 +182,9 @@ func searchDisk(b fcDiskMounter) (string, error) {
|
||||
for true {
|
||||
for _, diskId := range diskIds {
|
||||
if len(wwns) != 0 {
|
||||
disk, dm = findDisk(diskId, lun, io)
|
||||
disk, dm = findDisk(diskId, lun, io, b.deviceUtil)
|
||||
} else {
|
||||
disk, dm = findDiskWWIDs(diskId, io)
|
||||
disk, dm = findDiskWWIDs(diskId, io, b.deviceUtil)
|
||||
}
|
||||
// if multipath device is found, break
|
||||
if dm != "" {
|
||||
@ -265,13 +250,160 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
|
||||
return devicePath, err
|
||||
}
|
||||
|
||||
func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devName string) error {
|
||||
// Remove scsi device from the node.
|
||||
if !strings.HasPrefix(devName, "/dev/") {
|
||||
return fmt.Errorf("fc detach disk: invalid device name: %s", devName)
|
||||
// DetachDisk removes scsi device file such as /dev/sdX from the node.
|
||||
func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
|
||||
var devices []string
|
||||
// devicePath might be like /dev/mapper/mpathX. Find destination.
|
||||
dstPath, err := c.io.EvalSymlinks(devicePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Find slave
|
||||
if strings.HasPrefix(dstPath, "/dev/dm-") {
|
||||
devices = c.deviceUtil.FindSlaveDevicesOnMultipath(dstPath)
|
||||
} else {
|
||||
// Add single devicepath to devices
|
||||
devices = append(devices, dstPath)
|
||||
}
|
||||
glog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices)
|
||||
var lastErr error
|
||||
for _, device := range devices {
|
||||
err := util.detachFCDisk(c.io, device)
|
||||
if err != nil {
|
||||
glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
|
||||
lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
|
||||
}
|
||||
}
|
||||
if lastErr != nil {
|
||||
glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
|
||||
return lastErr
|
||||
}
|
||||
arr := strings.Split(devName, "/")
|
||||
dev := arr[len(arr)-1]
|
||||
removeFromScsiSubsystem(dev, c.io)
|
||||
return nil
|
||||
}
|
||||
|
||||
// detachFCDisk removes scsi device file such as /dev/sdX from the node.
|
||||
func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error {
|
||||
// Remove scsi device from the node.
|
||||
if !strings.HasPrefix(devicePath, "/dev/") {
|
||||
return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath)
|
||||
}
|
||||
arr := strings.Split(devicePath, "/")
|
||||
dev := arr[len(arr)-1]
|
||||
removeFromScsiSubsystem(dev, io)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachBlockFCDisk detaches a volume from kubelet node, removes scsi device file
|
||||
// such as /dev/sdX from the node, and then removes loopback for the scsi device.
|
||||
func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
|
||||
// Check if devicePath is valid
|
||||
if len(devicePath) != 0 {
|
||||
if pathExists, pathErr := checkPathExists(devicePath); !pathExists || pathErr != nil {
|
||||
return pathErr
|
||||
}
|
||||
} else {
|
||||
// TODO: FC plugin can't obtain the devicePath from kubelet because devicePath
|
||||
// in volume object isn't updated when volume is attached to kubelet node.
|
||||
glog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath)
|
||||
}
|
||||
|
||||
// Check if global map path is valid
|
||||
// global map path examples:
|
||||
// wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/
|
||||
// wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/
|
||||
if pathExists, pathErr := checkPathExists(mapPath); !pathExists || pathErr != nil {
|
||||
return pathErr
|
||||
}
|
||||
|
||||
// Retrieve volume plugin dependent path like '50060e801049cfd1-lun-0' from global map path
|
||||
arr := strings.Split(mapPath, "/")
|
||||
if len(arr) < 1 {
|
||||
return fmt.Errorf("Fail to retrieve volume plugin information from global map path: %v", mapPath)
|
||||
}
|
||||
volumeInfo := arr[len(arr)-1]
|
||||
|
||||
// Search symbolick link which matches volumeInfo under /dev/disk/by-path or /dev/disk/by-id
|
||||
// then find destination device path from the link
|
||||
searchPath := byID
|
||||
if strings.Contains(volumeInfo, "-lun-") {
|
||||
searchPath = byPath
|
||||
}
|
||||
fis, err := ioutil.ReadDir(searchPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
if strings.Contains(fi.Name(), volumeInfo) {
|
||||
devicePath = path.Join(searchPath, fi.Name())
|
||||
glog.V(5).Infof("fc: updated devicePath: %s", devicePath)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(devicePath) == 0 {
|
||||
return fmt.Errorf("fc: failed to find corresponding device from searchPath: %v", searchPath)
|
||||
}
|
||||
dstPath, err := c.io.EvalSymlinks(devicePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath)
|
||||
|
||||
// Get loopback device which takes fd lock for device beofore detaching a volume from node.
|
||||
// TODO: This is a workaround for issue #54108
|
||||
// Currently local attach plugins such as FC, iSCSI, RBD can't obtain devicePath during
|
||||
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
|
||||
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
||||
// local attach plugins needs to remove loopback device during TearDownDevice().
|
||||
var devices []string
|
||||
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath)
|
||||
if len(dm) != 0 {
|
||||
dstPath = dm
|
||||
}
|
||||
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath)
|
||||
if err != nil {
|
||||
if err.Error() != volumepathhandler.ErrDeviceNotFound {
|
||||
return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
|
||||
}
|
||||
glog.Warning("fc: loopback for destination path: %s not found", dstPath)
|
||||
}
|
||||
|
||||
// Detach volume from kubelet node
|
||||
if len(dm) != 0 {
|
||||
// Find all devices which are managed by multipath
|
||||
devices = c.deviceUtil.FindSlaveDevicesOnMultipath(dm)
|
||||
} else {
|
||||
// Add single device path to devices
|
||||
devices = append(devices, dstPath)
|
||||
}
|
||||
var lastErr error
|
||||
for _, device := range devices {
|
||||
err = util.detachFCDisk(c.io, device)
|
||||
if err != nil {
|
||||
glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
|
||||
lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
|
||||
}
|
||||
}
|
||||
if lastErr != nil {
|
||||
glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
|
||||
return lastErr
|
||||
}
|
||||
if len(loop) != 0 {
|
||||
// The volume was successfully detached from node. We can safely remove the loopback.
|
||||
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fc: failed to remove loopback :%v, err: %v", loop, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPathExists(path string) (bool, error) {
|
||||
if pathExists, pathErr := volumeutil.PathExists(path); pathErr != nil {
|
||||
return pathExists, fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmap skipped because path does not exist: %v", path)
|
||||
return pathExists, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util_test.go
generated
vendored
@ -20,6 +20,8 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type fakeFileInfo struct {
|
||||
@ -91,6 +93,7 @@ func TestSearchDisk(t *testing.T) {
|
||||
lun: "0",
|
||||
io: &fakeIOHandler{},
|
||||
},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}
|
||||
devicePath, error := searchDisk(fakeMounter)
|
||||
// if no disk matches input wwn and lun, exit
|
||||
@ -105,6 +108,7 @@ func TestSearchDiskWWID(t *testing.T) {
|
||||
wwids: []string{"3600508b400105e210000900000490000"},
|
||||
io: &fakeIOHandler{},
|
||||
},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
}
|
||||
devicePath, error := searchDisk(fakeMounter)
|
||||
// if no disk matches input wwid, exit
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD
generated
vendored
@ -54,8 +54,7 @@ go_test(
|
||||
"probe_test.go",
|
||||
"unmounter_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/flexvolume",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/filesystem:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go
generated
vendored
@ -48,7 +48,16 @@ func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir strin
|
||||
// MountDevice is part of the volume.Attacher interface
|
||||
func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name, ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
|
||||
volSource, readOnly := getVolumeSource(spec)
|
||||
|
||||
volSourceFSType, err := getFSType(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
readOnly, err := getReadOnly(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := make([]string, 0)
|
||||
|
||||
@ -60,5 +69,5 @@ func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, dev
|
||||
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: a.plugin.host.GetExec(a.plugin.GetPluginName())}
|
||||
|
||||
return diskMounter.FormatAndMount(devicePath, deviceMountPath, volSource.FSType, options)
|
||||
return diskMounter.FormatAndMount(devicePath, deviceMountPath, volSourceFSType, options)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/common_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/common_test.go
generated
vendored
@ -119,7 +119,7 @@ func fakePersistentVolumeSpec() *volume.Spec {
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FlexVolume: &v1.FlexVolumeSource{
|
||||
FlexVolume: &v1.FlexPersistentVolumeSource{
|
||||
Driver: "kubernetes.io/fakeAttacher",
|
||||
ReadOnly: false,
|
||||
},
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go
generated
vendored
@ -19,7 +19,6 @@ package flexvolume
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -47,18 +46,6 @@ func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForDetach is part of the volume.Detacher interface.
|
||||
func (d *flexVolumeDetacher) WaitForDetach(devicePath string, timeout time.Duration) error {
|
||||
call := d.plugin.NewDriverCallWithTimeout(waitForDetachCmd, timeout)
|
||||
call.Append(devicePath)
|
||||
|
||||
_, err := call.Run()
|
||||
if isCmdNotSupportedErr(err) {
|
||||
return (*detacherDefaults)(d).WaitForDetach(devicePath, timeout)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmountDevice is part of the volume.Detacher interface.
|
||||
func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go
generated
vendored
@ -39,7 +39,6 @@ const (
|
||||
mountDeviceCmd = "mountdevice"
|
||||
|
||||
detachCmd = "detach"
|
||||
waitForDetachCmd = "waitfordetach"
|
||||
unmountDeviceCmd = "unmountdevice"
|
||||
|
||||
mountCmd = "mount"
|
||||
@ -162,10 +161,25 @@ func (dc *DriverCall) Run() (*DriverStatus, error) {
|
||||
type OptionsForDriver map[string]string
|
||||
|
||||
func NewOptionsForDriver(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) (OptionsForDriver, error) {
|
||||
volSource, readOnly := getVolumeSource(spec)
|
||||
|
||||
volSourceFSType, err := getFSType(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
readOnly, err := getReadOnly(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volSourceOptions, err := getOptions(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := map[string]string{}
|
||||
|
||||
options[optionFSType] = volSource.FSType
|
||||
options[optionFSType] = volSourceFSType
|
||||
|
||||
if readOnly {
|
||||
options[optionReadWrite] = "ro"
|
||||
@ -179,7 +193,7 @@ func NewOptionsForDriver(spec *volume.Spec, host volume.VolumeHost, extraOptions
|
||||
options[key] = value
|
||||
}
|
||||
|
||||
for key, value := range volSource.Options {
|
||||
for key, value := range volSourceOptions {
|
||||
options[key] = value
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume_test.go
generated
vendored
@ -185,7 +185,7 @@ func TestCanSupport(t *testing.T) {
|
||||
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if !plugin.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}}) {
|
||||
if !plugin.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FlexVolume: &v1.FlexPersistentVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
@ -103,7 +104,7 @@ func (plugin *flexVolumePlugin) getExecutable() string {
|
||||
execName := parts[len(parts)-1]
|
||||
execPath := path.Join(plugin.execPath, execName)
|
||||
if runtime.GOOS == "windows" {
|
||||
execPath = volume.GetWindowsPath(execPath)
|
||||
execPath = util.GetWindowsPath(execPath)
|
||||
}
|
||||
return execPath
|
||||
}
|
||||
@ -137,8 +138,11 @@ func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error)
|
||||
|
||||
// CanSupport is part of the volume.VolumePlugin interface.
|
||||
func (plugin *flexVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
||||
source, _ := getVolumeSource(spec)
|
||||
return (source != nil) && (source.Driver == plugin.driverName)
|
||||
sourceDriver, err := getDriver(spec)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return sourceDriver == plugin.driverName
|
||||
}
|
||||
|
||||
// RequiresRemount is part of the volume.VolumePlugin interface.
|
||||
@ -161,10 +165,19 @@ func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ vo
|
||||
|
||||
// newMounterInternal is the internal mounter routine to build the volume.
|
||||
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, runner exec.Interface) (volume.Mounter, error) {
|
||||
source, readOnly := getVolumeSource(spec)
|
||||
sourceDriver, err := getDriver(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
readOnly, err := getReadOnly(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &flexVolumeMounter{
|
||||
flexVolume: &flexVolume{
|
||||
driverName: source.Driver,
|
||||
driverName: sourceDriver,
|
||||
execPath: plugin.getExecutable(),
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
|
80
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go
generated
vendored
80
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go
generated
vendored
@ -22,15 +22,18 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
func addSecretsToOptions(options map[string]string, spec *volume.Spec, namespace string, driverName string, host volume.VolumeHost) error {
|
||||
fv, _ := getVolumeSource(spec)
|
||||
if fv.SecretRef == nil {
|
||||
secretName, secretNamespace, err := getSecretNameAndNamespace(spec, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(secretName) == 0 || len(secretNamespace) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -39,9 +42,9 @@ func addSecretsToOptions(options map[string]string, spec *volume.Spec, namespace
|
||||
return fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
|
||||
secrets, err := util.GetSecretForPV(namespace, fv.SecretRef.Name, driverName, host.GetKubeClient())
|
||||
secrets, err := util.GetSecretForPV(secretNamespace, secretName, driverName, host.GetKubeClient())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", namespace, fv.SecretRef.Name, err)
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNamespace, secretName, err)
|
||||
return err
|
||||
}
|
||||
for name, data := range secrets {
|
||||
@ -52,15 +55,68 @@ func addSecretsToOptions(options map[string]string, spec *volume.Spec, namespace
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (volumeSource *api.FlexVolumeSource, readOnly bool) {
|
||||
var notFlexVolume = fmt.Errorf("not a flex volume")
|
||||
|
||||
func getDriver(spec *volume.Spec) (string, error) {
|
||||
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
||||
volumeSource = spec.Volume.FlexVolume
|
||||
readOnly = volumeSource.ReadOnly
|
||||
} else if spec.PersistentVolume != nil {
|
||||
volumeSource = spec.PersistentVolume.Spec.FlexVolume
|
||||
readOnly = spec.ReadOnly
|
||||
return spec.Volume.FlexVolume.Driver, nil
|
||||
}
|
||||
return
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil {
|
||||
return spec.PersistentVolume.Spec.FlexVolume.Driver, nil
|
||||
}
|
||||
return "", notFlexVolume
|
||||
}
|
||||
|
||||
func getFSType(spec *volume.Spec) (string, error) {
|
||||
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
||||
return spec.Volume.FlexVolume.FSType, nil
|
||||
}
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil {
|
||||
return spec.PersistentVolume.Spec.FlexVolume.FSType, nil
|
||||
}
|
||||
return "", notFlexVolume
|
||||
}
|
||||
|
||||
func getSecretNameAndNamespace(spec *volume.Spec, podNamespace string) (string, string, error) {
|
||||
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
||||
if spec.Volume.FlexVolume.SecretRef == nil {
|
||||
return "", "", nil
|
||||
}
|
||||
return spec.Volume.FlexVolume.SecretRef.Name, podNamespace, nil
|
||||
}
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil {
|
||||
if spec.PersistentVolume.Spec.FlexVolume.SecretRef == nil {
|
||||
return "", "", nil
|
||||
}
|
||||
secretName := spec.PersistentVolume.Spec.FlexVolume.SecretRef.Name
|
||||
secretNamespace := spec.PersistentVolume.Spec.FlexVolume.SecretRef.Namespace
|
||||
if len(secretNamespace) == 0 {
|
||||
secretNamespace = podNamespace
|
||||
}
|
||||
return secretName, secretNamespace, nil
|
||||
}
|
||||
return "", "", notFlexVolume
|
||||
}
|
||||
|
||||
func getReadOnly(spec *volume.Spec) (bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
||||
return spec.Volume.FlexVolume.ReadOnly, nil
|
||||
}
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil {
|
||||
// ReadOnly is specified at the PV level
|
||||
return spec.ReadOnly, nil
|
||||
}
|
||||
return false, notFlexVolume
|
||||
}
|
||||
|
||||
func getOptions(spec *volume.Spec) (map[string]string, error) {
|
||||
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
||||
return spec.Volume.FlexVolume.Options, nil
|
||||
}
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil {
|
||||
return spec.PersistentVolume.Spec.FlexVolume.Options, nil
|
||||
}
|
||||
return nil, notFlexVolume
|
||||
}
|
||||
|
||||
func prepareForMount(mounter mount.Interface, deviceMountPath string) (bool, error) {
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD
generated
vendored
@ -21,7 +21,6 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/clusterhq/flocker-go:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@ -39,8 +38,7 @@ go_test(
|
||||
"flocker_util_test.go",
|
||||
"flocker_volume_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/flocker",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_test.go
generated
vendored
@ -338,7 +338,7 @@ func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string
|
||||
}
|
||||
|
||||
/*
|
||||
TODO: reenable after refactor
|
||||
TODO: re-enable after refactor
|
||||
func TestSetUpAtInternal(t *testing.T) {
|
||||
const dir = "dir"
|
||||
mockPath := "expected-to-be-set-properly" // package var
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go
generated
vendored
@ -22,7 +22,8 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
|
||||
flockerapi "github.com/clusterhq/flocker-go"
|
||||
"github.com/golang/glog"
|
||||
@ -73,7 +74,7 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
|
||||
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
volumeSizeGB = int(volutil.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
|
||||
createOptions := &flockerapi.CreateDatasetOptions{
|
||||
MaximumSize: requestBytes,
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type volumeManager interface {
|
||||
@ -55,7 +55,7 @@ type flockerVolumeProvisioner struct {
|
||||
var _ volume.Provisioner = &flockerVolumeProvisioner{}
|
||||
|
||||
func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
Name: c.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: "flocker-dynamic-provisioner",
|
||||
util.VolumeDynamicallyCreatedByKey: "flocker-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD
generated
vendored
@ -12,23 +12,27 @@ go_library(
|
||||
"attacher.go",
|
||||
"doc.go",
|
||||
"gce_pd.go",
|
||||
"gce_pd_block.go",
|
||||
"gce_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/gce_pd",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -37,14 +41,16 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attacher_test.go",
|
||||
"gce_pd_block_test.go",
|
||||
"gce_pd_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/gce_pd",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go
generated
vendored
@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
type gcePersistentDiskAttacher struct {
|
||||
@ -88,7 +87,7 @@ func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName ty
|
||||
// Volume is already attached to node.
|
||||
glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName)
|
||||
} else {
|
||||
if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly); err != nil {
|
||||
if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly, isRegionalPD(spec)); err != nil {
|
||||
glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
@ -103,7 +102,7 @@ func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Sp
|
||||
pdNameList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
// If error is occured, skip this volume and move to the next one
|
||||
// If error is occurred, skip this volume and move to the next one
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
@ -209,8 +208,8 @@ func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, device
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host)
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher_test.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher_test.go
generated
vendored
@ -29,6 +29,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TestGetDeviceName_Volume(t *testing.T) {
|
||||
@ -48,7 +50,7 @@ func TestGetDeviceName_Volume(t *testing.T) {
|
||||
func TestGetDeviceName_PersistentVolume(t *testing.T) {
|
||||
plugin := newPlugin()
|
||||
name := "my-pd-pv"
|
||||
spec := createPVSpec(name, true)
|
||||
spec := createPVSpec(name, true, nil)
|
||||
|
||||
deviceName, err := plugin.GetVolumeName(spec)
|
||||
if err != nil {
|
||||
@ -74,10 +76,39 @@ type testcase struct {
|
||||
expectedReturn error
|
||||
}
|
||||
|
||||
func TestAttachDetachRegional(t *testing.T) {
|
||||
diskName := "disk"
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
regional := true
|
||||
spec := createPVSpec(diskName, readOnly, []string{"zone1", "zone2"})
|
||||
// Successful Attach call
|
||||
testcase := testcase{
|
||||
name: "Attach_Regional_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, regional, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
if devicePath != "/dev/disk/by-id/google-disk" {
|
||||
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
|
||||
}
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
err := testcase.test(&testcase)
|
||||
if err != testcase.expectedReturn {
|
||||
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedReturn.Error(), err.Error())
|
||||
}
|
||||
t.Logf("Test %q succeeded", testcase.name)
|
||||
}
|
||||
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := "disk"
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
regional := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
@ -87,7 +118,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, regional, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
@ -116,7 +147,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
{
|
||||
name: "Attach_Positive_CheckFails",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, nil},
|
||||
attach: attachCall{diskName, nodeName, readOnly, regional, nil},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
@ -131,7 +162,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, attachError},
|
||||
attach: attachCall{diskName, nodeName, readOnly, regional, attachError},
|
||||
test: func(testcase *testcase) error {
|
||||
attacher := newAttacher(testcase)
|
||||
devicePath, err := attacher.Attach(spec, nodeName)
|
||||
@ -238,8 +269,8 @@ func createVolSpec(name string, readOnly bool) *volume.Spec {
|
||||
}
|
||||
}
|
||||
|
||||
func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
func createPVSpec(name string, readOnly bool, zones []string) *volume.Spec {
|
||||
spec := &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
@ -251,6 +282,15 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if zones != nil {
|
||||
zonesLabel := strings.Join(zones, kubeletapis.LabelMultiZoneDelimiter)
|
||||
spec.PersistentVolume.ObjectMeta.Labels = map[string]string{
|
||||
kubeletapis.LabelZoneFailureDomain: zonesLabel,
|
||||
}
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
// Fake GCE implementation
|
||||
@ -259,6 +299,7 @@ type attachCall struct {
|
||||
diskName string
|
||||
nodeName types.NodeName
|
||||
readOnly bool
|
||||
regional bool
|
||||
ret error
|
||||
}
|
||||
|
||||
@ -275,7 +316,7 @@ type diskIsAttachedCall struct {
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
@ -300,6 +341,11 @@ func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, r
|
||||
return errors.New("Unexpected AttachDisk call: wrong readOnly")
|
||||
}
|
||||
|
||||
if expected.regional != regional {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected regional %v, got %v", expected.regional, regional)
|
||||
return errors.New("Unexpected AttachDisk call: wrong regional")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret)
|
||||
|
||||
return expected.ret
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go
generated
vendored
@ -31,7 +31,6 @@ import (
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
@ -398,7 +397,7 @@ type gcePersistentDiskProvisioner struct {
|
||||
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
|
||||
|
||||
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
@ -416,14 +415,14 @@ func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error)
|
||||
Name: c.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: "gce-pd-dynamic-provisioner",
|
||||
util.VolumeDynamicallyCreatedByKey: "gce-pd-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
|
169
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block.go
generated
vendored
Normal file
169
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce_pd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
var _ volume.VolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.BlockVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.ExpandableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(gcePersistentDiskPluginName)
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
|
||||
|
||||
globalMapPath := filepath.Dir(globalMapPathUUID)
|
||||
if len(globalMapPath) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
|
||||
}
|
||||
|
||||
return getVolumeSpecFromGlobalMapPath(globalMapPath)
|
||||
}
|
||||
|
||||
func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) {
|
||||
// Get volume spec information from globalMapPath
|
||||
// globalMapPath example:
|
||||
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
|
||||
// plugins/kubernetes.io/gce-pd/volumeDevices/vol-XXXXXX
|
||||
pdName := filepath.Base(globalMapPath)
|
||||
if len(pdName) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get pd name from global path=%s", globalMapPath)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
gceVolume := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: pdName,
|
||||
},
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(gceVolume, true), nil
|
||||
}
|
||||
|
||||
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
|
||||
func (plugin *gcePersistentDiskPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
|
||||
// Pass empty string as dummy uid since uid isn't used in the case.
|
||||
var uid types.UID
|
||||
if pod != nil {
|
||||
uid = pod.UID
|
||||
}
|
||||
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, &GCEDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pdName := volumeSource.PDName
|
||||
partition := ""
|
||||
if volumeSource.Partition != 0 {
|
||||
partition = strconv.Itoa(int(volumeSource.Partition))
|
||||
}
|
||||
|
||||
return &gcePersistentDiskMapper{
|
||||
gcePersistentDisk: &gcePersistentDisk{
|
||||
volName: spec.Name(),
|
||||
podUID: podUID,
|
||||
pdName: pdName,
|
||||
partition: partition,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
},
|
||||
readOnly: readOnly}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return plugin.newUnmapperInternal(volName, podUID, &GCEDiskUtil{})
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) newUnmapperInternal(volName string, podUID types.UID, manager pdManager) (volume.BlockVolumeUnmapper, error) {
|
||||
return &gcePersistentDiskUnmapper{
|
||||
gcePersistentDisk: &gcePersistentDisk{
|
||||
volName: volName,
|
||||
podUID: podUID,
|
||||
pdName: volName,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (c *gcePersistentDiskUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type gcePersistentDiskUnmapper struct {
|
||||
*gcePersistentDisk
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &gcePersistentDiskUnmapper{}
|
||||
|
||||
type gcePersistentDiskMapper struct {
|
||||
*gcePersistentDisk
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &gcePersistentDiskMapper{}
|
||||
|
||||
func (b *gcePersistentDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/pdName
|
||||
func (pd *gcePersistentDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(pd.plugin.host.GetVolumeDevicePluginDir(gcePersistentDiskPluginName), string(volumeSource.PDName)), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~aws
|
||||
func (pd *gcePersistentDisk) GetPodDeviceMapPath() (string, string) {
|
||||
name := gcePersistentDiskPluginName
|
||||
return pd.plugin.host.GetPodVolumeDeviceDir(pd.podUID, kstrings.EscapeQualifiedNameForDisk(name)), pd.volName
|
||||
}
|
145
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block_test.go
generated
vendored
Normal file
145
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block_test.go
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce_pd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testPdName = "pdVol1"
|
||||
testPVName = "pv1"
|
||||
testGlobalPath = "plugins/kubernetes.io/gce-pd/volumeDevices/pdVol1"
|
||||
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~gce-pd"
|
||||
)
|
||||
|
||||
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
|
||||
// make our test path for fake GlobalMapPath
|
||||
// /tmp symbolized our pluginDir
|
||||
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/gce-pd/volumeDevices/pdVol1
|
||||
tmpVDir, err := utiltesting.MkTmpdir("gceBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
|
||||
|
||||
//Bad Path
|
||||
badspec, err := getVolumeSpecFromGlobalMapPath("")
|
||||
if badspec != nil || err == nil {
|
||||
t.Errorf("Expected not to get spec from GlobalMapPath but did")
|
||||
}
|
||||
|
||||
// Good Path
|
||||
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath)
|
||||
if spec == nil || err != nil {
|
||||
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
|
||||
}
|
||||
if spec.PersistentVolume.Spec.GCEPersistentDisk.PDName != testPdName {
|
||||
t.Errorf("Invalid pdName from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.GCEPersistentDisk.PDName)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
specMode := spec.PersistentVolume.Spec.VolumeMode
|
||||
if &specMode == nil {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", &specMode, block)
|
||||
}
|
||||
if *specMode != block {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestVolume(readOnly bool, path string, isBlock bool) *volume.Spec {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPVName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: testPdName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
pv.Spec.VolumeMode = &blockMode
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
||||
}
|
||||
|
||||
func TestGetPodAndPluginMapPaths(t *testing.T) {
|
||||
tmpVDir, err := utiltesting.MkTmpdir("gceBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := path.Join(tmpVDir, testPodPath)
|
||||
|
||||
spec := getTestVolume(false, tmpVDir, true /*isBlock*/)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
|
||||
plug, err := plugMgr.FindMapperPluginByName(gcePersistentDiskPluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpVDir)
|
||||
t.Fatalf("Can't find the plugin by name: %q", gcePersistentDiskPluginName)
|
||||
}
|
||||
if plug.GetPluginName() != gcePersistentDiskPluginName {
|
||||
t.Fatalf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mapper == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
//GetGlobalMapPath
|
||||
gMapPath, err := mapper.GetGlobalMapPath(spec)
|
||||
if err != nil || len(gMapPath) == 0 {
|
||||
t.Fatalf("Invalid GlobalMapPath from spec: %s", spec.PersistentVolume.Spec.GCEPersistentDisk.PDName)
|
||||
}
|
||||
if gMapPath != expectedGlobalPath {
|
||||
t.Errorf("Failed to get GlobalMapPath: %s %s", gMapPath, expectedGlobalPath)
|
||||
}
|
||||
|
||||
//GetPodDeviceMapPath
|
||||
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
|
||||
if gDevicePath != expectedPodPath {
|
||||
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
|
||||
}
|
||||
if gVolName != testPVName {
|
||||
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
|
||||
}
|
||||
}
|
10
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
@ -138,13 +139,6 @@ func TestPlugin(t *testing.T) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fakeManager = &fakePDManager{}
|
||||
unmounter, err := plug.(*gcePersistentDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
|
||||
@ -183,7 +177,7 @@ func TestPlugin(t *testing.T) {
|
||||
}
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 100*1024*1024*1024 {
|
||||
if size != 100*util.GB {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
}
|
||||
|
||||
|
125
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go
generated
vendored
125
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go
generated
vendored
@ -26,8 +26,11 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/utils/exec"
|
||||
@ -40,11 +43,13 @@ const (
|
||||
diskPartitionSuffix = "-part"
|
||||
diskSDPath = "/dev/sd"
|
||||
diskSDPattern = "/dev/sd*"
|
||||
regionalPDZonesAuto = "auto" // "replica-zones: auto" means Kubernetes will select zones for RePD
|
||||
maxChecks = 60
|
||||
maxRetries = 10
|
||||
checkSleepDuration = time.Second
|
||||
maxRegionalPDZones = 2
|
||||
|
||||
// Replication type constants must be lower case.
|
||||
replicationTypeNone = "none"
|
||||
replicationTypeRegionalPD = "regional-pd"
|
||||
)
|
||||
|
||||
// These variables are modified only in unit tests and should be constant
|
||||
@ -79,21 +84,21 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
|
||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
|
||||
name := volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
// GCE works with gigabytes, convert to GiB with rounding up
|
||||
requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)
|
||||
// GCE PDs are allocated in chunks of GBs (not GiBs)
|
||||
requestGB := volumeutil.RoundUpToGB(capacity)
|
||||
|
||||
// Apply Parameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
// Apply Parameters.
|
||||
// Values for parameter "replication-type" are canonicalized to lower case.
|
||||
// Values for other parameters are case-insensitive, and we leave validation of these values
|
||||
// to the cloud provider.
|
||||
diskType := ""
|
||||
configuredZone := ""
|
||||
configuredZones := ""
|
||||
configuredReplicaZones := ""
|
||||
zonePresent := false
|
||||
zonesPresent := false
|
||||
replicaZonesPresent := false
|
||||
replicationType := replicationTypeNone
|
||||
fstype := ""
|
||||
for k, v := range c.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
@ -105,9 +110,13 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||
case "zones":
|
||||
zonesPresent = true
|
||||
configuredZones = v
|
||||
case "replica-zones":
|
||||
replicaZonesPresent = true
|
||||
configuredReplicaZones = v
|
||||
case "replication-type":
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return "", 0, nil, "",
|
||||
fmt.Errorf("the %q option for volume plugin %v is only supported with the %q Kubernetes feature gate enabled",
|
||||
k, c.plugin.GetPluginName(), features.GCERegionalPersistentDisk)
|
||||
}
|
||||
replicationType = strings.ToLower(v)
|
||||
case volume.VolumeParameterFSType:
|
||||
fstype = v
|
||||
default:
|
||||
@ -115,10 +124,14 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||
}
|
||||
}
|
||||
|
||||
if ((zonePresent || zonesPresent) && replicaZonesPresent) ||
|
||||
(zonePresent && zonesPresent) {
|
||||
// 011, 101, 111, 110
|
||||
return "", 0, nil, "", fmt.Errorf("a combination of zone, zones, and replica-zones StorageClass parameters must not be used at the same time")
|
||||
if zonePresent && zonesPresent {
|
||||
return "", 0, nil, "", fmt.Errorf("the 'zone' and 'zones' StorageClass parameters must not be used at the same time")
|
||||
}
|
||||
|
||||
if replicationType == replicationTypeRegionalPD && zonePresent {
|
||||
// If a user accidentally types 'zone' instead of 'zones', we want to throw an error
|
||||
// instead of assuming that 'zones' is empty and proceed by randomly selecting zones.
|
||||
return "", 0, nil, "", fmt.Errorf("the '%s' replication type does not support the 'zone' parameter; use 'zones' instead", replicationTypeRegionalPD)
|
||||
}
|
||||
|
||||
// TODO: implement PVC.Selector parsing
|
||||
@ -126,18 +139,13 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
|
||||
}
|
||||
|
||||
if !zonePresent && !zonesPresent && replicaZonesPresent {
|
||||
// 001 - "replica-zones" specified
|
||||
replicaZones, err := volumeutil.ZonesToSet(configuredReplicaZones)
|
||||
if err != nil {
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
|
||||
switch replicationType {
|
||||
case replicationTypeRegionalPD:
|
||||
err = createRegionalPD(
|
||||
name,
|
||||
c.options.PVC.Name,
|
||||
diskType,
|
||||
replicaZones,
|
||||
configuredZones,
|
||||
requestGB,
|
||||
c.options.CloudTags,
|
||||
cloud)
|
||||
@ -147,10 +155,11 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Successfully created Regional GCE PD volume %s", name)
|
||||
} else {
|
||||
|
||||
case replicationTypeNone:
|
||||
var zones sets.String
|
||||
if !zonePresent && !zonesPresent {
|
||||
// 000 - neither "zone", "zones", or "replica-zones" specified
|
||||
// 00 - neither "zone" or "zones" specified
|
||||
// Pick a zone randomly selected from all active zones where
|
||||
// Kubernetes cluster has a node.
|
||||
zones, err = cloud.GetAllCurrentZones()
|
||||
@ -159,21 +168,21 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
} else if !zonePresent && zonesPresent {
|
||||
// 010 - "zones" specified
|
||||
// 01 - "zones" specified
|
||||
// Pick a zone randomly selected from specified set.
|
||||
if zones, err = volumeutil.ZonesToSet(configuredZones); err != nil {
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
} else if zonePresent && !zonesPresent {
|
||||
// 100 - "zone" specified
|
||||
// 10 - "zone" specified
|
||||
// Use specified zone
|
||||
if err := volume.ValidateZone(configuredZone); err != nil {
|
||||
if err := volumeutil.ValidateZone(configuredZone); err != nil {
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
zones = make(sets.String)
|
||||
zones.Insert(configuredZone)
|
||||
}
|
||||
zone := volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
zone := volumeutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
|
||||
if err := cloud.CreateDisk(
|
||||
name,
|
||||
@ -186,6 +195,9 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name)
|
||||
|
||||
default:
|
||||
return "", 0, nil, "", fmt.Errorf("replication-type of '%s' is not supported", replicationType)
|
||||
}
|
||||
|
||||
labels, err := cloud.GetAutoLabelsForPD(name, "" /* zone */)
|
||||
@ -202,32 +214,41 @@ func createRegionalPD(
|
||||
diskName string,
|
||||
pvcName string,
|
||||
diskType string,
|
||||
replicaZones sets.String,
|
||||
zonesString string,
|
||||
requestGB int64,
|
||||
cloudTags *map[string]string,
|
||||
cloud *gcecloud.GCECloud) error {
|
||||
|
||||
autoZoneSelection := false
|
||||
if replicaZones.Len() != maxRegionalPDZones {
|
||||
replicaZonesList := replicaZones.UnsortedList()
|
||||
if replicaZones.Len() == 1 && replicaZonesList[0] == regionalPDZonesAuto {
|
||||
// User requested automatic zone selection.
|
||||
autoZoneSelection = true
|
||||
} else {
|
||||
return fmt.Errorf(
|
||||
"replica-zones specifies %d zones. It must specify %d zones or the keyword \"auto\" to let Kubernetes select zones.",
|
||||
replicaZones.Len(),
|
||||
maxRegionalPDZones)
|
||||
var replicaZones sets.String
|
||||
var err error
|
||||
|
||||
if zonesString == "" {
|
||||
// Consider all zones
|
||||
replicaZones, err = cloud.GetAllCurrentZones()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("error getting zone information from GCE: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
replicaZones, err = volumeutil.ZonesToSet(zonesString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
selectedReplicaZones := replicaZones
|
||||
if autoZoneSelection {
|
||||
selectedReplicaZones = volume.ChooseZonesForVolume(
|
||||
zoneCount := replicaZones.Len()
|
||||
var selectedReplicaZones sets.String
|
||||
if zoneCount < maxRegionalPDZones {
|
||||
return fmt.Errorf("cannot specify only %d zone(s) for Regional PDs.", zoneCount)
|
||||
} else if zoneCount == maxRegionalPDZones {
|
||||
selectedReplicaZones = replicaZones
|
||||
} else {
|
||||
// Must randomly select zones
|
||||
selectedReplicaZones = volumeutil.ChooseZonesForVolume(
|
||||
replicaZones, pvcName, maxRegionalPDZones)
|
||||
}
|
||||
|
||||
if err := cloud.CreateRegionalDisk(
|
||||
if err = cloud.CreateRegionalDisk(
|
||||
diskName,
|
||||
diskType,
|
||||
selectedReplicaZones,
|
||||
@ -357,3 +378,13 @@ func udevadmChangeToDrive(drivePath string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks whether the given GCE PD volume spec is associated with a regional PD.
|
||||
func isRegionalPD(spec *volume.Spec) bool {
|
||||
if spec.PersistentVolume != nil {
|
||||
zonesLabel := spec.PersistentVolume.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
zones := strings.Split(zonesLabel, kubeletapis.LabelMultiZoneDelimiter)
|
||||
return len(zones) > 1
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD
generated
vendored
@ -14,29 +14,27 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/git_repo",
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["git_repo_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/git_repo",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go
generated
vendored
@ -24,10 +24,10 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
@ -100,7 +100,8 @@ func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts vol
|
||||
source: spec.Volume.GitRepo.Repository,
|
||||
revision: spec.Volume.GitRepo.Revision,
|
||||
target: spec.Volume.GitRepo.Directory,
|
||||
exec: exec.New(),
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
exec: plugin.host.GetExec(plugin.GetPluginName()),
|
||||
opts: opts,
|
||||
}, nil
|
||||
}
|
||||
@ -149,7 +150,8 @@ type gitRepoVolumeMounter struct {
|
||||
source string
|
||||
revision string
|
||||
target string
|
||||
exec exec.Interface
|
||||
mounter mount.Interface
|
||||
exec mount.Exec
|
||||
opts volume.VolumeOptions
|
||||
}
|
||||
|
||||
@ -195,7 +197,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if len(b.target) != 0 {
|
||||
args = append(args, b.target)
|
||||
}
|
||||
if output, err := b.execCommand("git", args, dir); err != nil {
|
||||
if output, err := b.execGit(args, dir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git %s': %s: %v",
|
||||
strings.Join(args, " "), output, err)
|
||||
}
|
||||
@ -225,10 +227,10 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return fmt.Errorf("unexpected directory contents: %v", files)
|
||||
}
|
||||
|
||||
if output, err := b.execCommand("git", []string{"checkout", b.revision}, subdir); err != nil {
|
||||
if output, err := b.execGit([]string{"checkout", b.revision}, subdir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err)
|
||||
}
|
||||
if output, err := b.execCommand("git", []string{"reset", "--hard"}, subdir); err != nil {
|
||||
if output, err := b.execGit([]string{"reset", "--hard"}, subdir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err)
|
||||
}
|
||||
|
||||
@ -242,10 +244,10 @@ func (b *gitRepoVolumeMounter) getMetaDir() string {
|
||||
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(gitRepoPluginName)), b.volName)
|
||||
}
|
||||
|
||||
func (b *gitRepoVolumeMounter) execCommand(command string, args []string, dir string) ([]byte, error) {
|
||||
cmd := b.exec.Command(command, args...)
|
||||
cmd.SetDir(dir)
|
||||
return cmd.CombinedOutput()
|
||||
func (b *gitRepoVolumeMounter) execGit(args []string, dir string) ([]byte, error) {
|
||||
// run git -C <dir> <args>
|
||||
fullArgs := append([]string{"-C", dir}, args...)
|
||||
return b.exec.Run("git", fullArgs...)
|
||||
}
|
||||
|
||||
// gitRepoVolumeUnmounter cleans git repo volumes.
|
||||
@ -262,7 +264,7 @@ func (c *gitRepoVolumeUnmounter) TearDown() error {
|
||||
|
||||
// TearDownAt simply deletes everything in the directory.
|
||||
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
|
||||
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
|
||||
|
182
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo_test.go
generated
vendored
182
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo_test.go
generated
vendored
@ -28,11 +28,16 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/utils/exec"
|
||||
fakeexec "k8s.io/utils/exec/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
gitUrl = "https://github.com/kubernetes/kubernetes.git"
|
||||
revision = "2a30ce65c5ab586b98916d83385c5983edd353a1"
|
||||
gitRepositoryName = "kubernetes"
|
||||
)
|
||||
|
||||
func newTestHost(t *testing.T) (string, volume.VolumeHost) {
|
||||
@ -62,23 +67,18 @@ func TestCanSupport(t *testing.T) {
|
||||
}
|
||||
|
||||
// Expected command
|
||||
type expectedCommand struct {
|
||||
// The git command
|
||||
cmd []string
|
||||
// The dir of git command is executed
|
||||
dir string
|
||||
type expectedCommand []string
|
||||
|
||||
type testScenario struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
repositoryDir string
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
gitUrl := "https://github.com/kubernetes/kubernetes.git"
|
||||
revision := "2a30ce65c5ab586b98916d83385c5983edd353a1"
|
||||
|
||||
scenarios := []struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}{
|
||||
scenarios := []testScenario{
|
||||
{
|
||||
name: "target-dir",
|
||||
vol: &v1.Volume{
|
||||
@ -91,19 +91,11 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "target_dir",
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl, "target_dir"},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "/target_dir",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "/target_dir",
|
||||
},
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl, "target_dir"},
|
||||
[]string{"git", "-C", "volume-dir/target_dir", "checkout", revision},
|
||||
[]string{"git", "-C", "volume-dir/target_dir", "reset", "--hard"},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -118,11 +110,9 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "target_dir",
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl, "target_dir"},
|
||||
dir: "",
|
||||
},
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl, "target_dir"},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -136,11 +126,9 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "kubernetes",
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl},
|
||||
dir: "",
|
||||
},
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -156,19 +144,11 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "kubernetes",
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "/kubernetes",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "/kubernetes",
|
||||
},
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl},
|
||||
[]string{"git", "-C", "volume-dir/kubernetes", "checkout", revision},
|
||||
[]string{"git", "-C", "volume-dir/kubernetes", "reset", "--hard"},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -184,19 +164,11 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "",
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", gitUrl, "."},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "",
|
||||
},
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl, "."},
|
||||
[]string{"git", "-C", "volume-dir", "checkout", revision},
|
||||
[]string{"git", "-C", "volume-dir", "reset", "--hard"},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -214,12 +186,7 @@ func TestPlugin(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func doTestPlugin(scenario struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}, t *testing.T) []error {
|
||||
func doTestPlugin(scenario testScenario, t *testing.T) []error {
|
||||
allErrs := []error{}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
@ -311,73 +278,42 @@ func doTestPlugin(scenario struct {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func doTestSetUp(scenario struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}, mounter volume.Mounter) []error {
|
||||
func doTestSetUp(scenario testScenario, mounter volume.Mounter) []error {
|
||||
expecteds := scenario.expecteds
|
||||
allErrs := []error{}
|
||||
|
||||
// Construct combined outputs from expected commands
|
||||
var fakeOutputs []fakeexec.FakeCombinedOutputAction
|
||||
var fcmd fakeexec.FakeCmd
|
||||
for _, expected := range expecteds {
|
||||
if expected.cmd[1] == "clone" {
|
||||
fakeOutputs = append(fakeOutputs, func() ([]byte, error) {
|
||||
// git clone, it creates new dir/files
|
||||
os.MkdirAll(path.Join(fcmd.Dirs[0], expected.dir), 0750)
|
||||
return []byte{}, nil
|
||||
})
|
||||
} else {
|
||||
// git checkout || git reset, they create nothing
|
||||
fakeOutputs = append(fakeOutputs, func() ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
})
|
||||
var commandLog []expectedCommand
|
||||
execCallback := func(cmd string, args ...string) ([]byte, error) {
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("expected at least 2 arguments, got %q", args)
|
||||
}
|
||||
if args[0] != "-C" {
|
||||
return nil, fmt.Errorf("expected the first argument to be \"-C\", got %q", args[0])
|
||||
}
|
||||
// command is 'git -C <dir> <command> <args>
|
||||
gitDir := args[1]
|
||||
gitCommand := args[2]
|
||||
if gitCommand == "clone" {
|
||||
// Clone creates a directory
|
||||
if scenario.repositoryDir != "" {
|
||||
os.MkdirAll(path.Join(gitDir, scenario.repositoryDir), 0750)
|
||||
}
|
||||
}
|
||||
// add the command to log with de-randomized gitDir
|
||||
args[1] = strings.Replace(gitDir, mounter.GetPath(), "volume-dir", 1)
|
||||
cmdline := append([]string{cmd}, args...)
|
||||
commandLog = append(commandLog, cmdline)
|
||||
return []byte{}, nil
|
||||
}
|
||||
fcmd = fakeexec.FakeCmd{
|
||||
CombinedOutputScript: fakeOutputs,
|
||||
}
|
||||
|
||||
// Construct fake exec outputs from fcmd
|
||||
var fakeAction []fakeexec.FakeCommandAction
|
||||
for i := 0; i < len(expecteds); i++ {
|
||||
fakeAction = append(fakeAction, func(cmd string, args ...string) exec.Cmd {
|
||||
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
||||
})
|
||||
|
||||
}
|
||||
fake := fakeexec.FakeExec{
|
||||
CommandScript: fakeAction,
|
||||
}
|
||||
|
||||
g := mounter.(*gitRepoVolumeMounter)
|
||||
g.exec = &fake
|
||||
g.mounter = &mount.FakeMounter{}
|
||||
g.exec = mount.NewFakeExec(execCallback)
|
||||
|
||||
g.SetUp(nil)
|
||||
|
||||
if fake.CommandCalls != len(expecteds) {
|
||||
if !reflect.DeepEqual(expecteds, commandLog) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected command calls in scenario: expected %d, saw: %d", len(expecteds), fake.CommandCalls))
|
||||
}
|
||||
var expectedCmds [][]string
|
||||
for _, expected := range expecteds {
|
||||
expectedCmds = append(expectedCmds, expected.cmd)
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCmds, fcmd.CombinedOutputLog) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected commands: %v, expected: %v", fcmd.CombinedOutputLog, expectedCmds))
|
||||
}
|
||||
|
||||
var expectedPaths []string
|
||||
for _, expected := range expecteds {
|
||||
expectedPaths = append(expectedPaths, g.GetPath()+expected.dir)
|
||||
}
|
||||
if len(fcmd.Dirs) != len(expectedPaths) || !reflect.DeepEqual(expectedPaths, fcmd.Dirs) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected directories: %v, expected: %v", fcmd.Dirs, expectedPaths))
|
||||
fmt.Errorf("unexpected commands: %v, expected: %v", commandLog, expecteds))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD
generated
vendored
@ -21,7 +21,6 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library",
|
||||
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library",
|
||||
@ -32,6 +31,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -42,8 +42,7 @@ go_test(
|
||||
"glusterfs_minmax_test.go",
|
||||
"glusterfs_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/glusterfs",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
329
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go
generated
vendored
329
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go
generated
vendored
@ -36,13 +36,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||
@ -74,6 +74,7 @@ const (
|
||||
gciLinuxGlusterMountBinaryPath = "/sbin/mount.glusterfs"
|
||||
defaultGidMin = 2000
|
||||
defaultGidMax = math.MaxInt32
|
||||
|
||||
// absoluteGidMin/Max are currently the same as the
|
||||
// default values, but they play a different role and
|
||||
// could take a different value. Only thing we need is:
|
||||
@ -84,6 +85,7 @@ const (
|
||||
heketiAnn = "heketi-dynamic-provisioner"
|
||||
glusterTypeAnn = "gluster.org/type"
|
||||
glusterDescAnn = "Gluster-Internal: Dynamically provisioned PV"
|
||||
heketiVolIDAnn = "gluster.kubernetes.io/heketi-volume-id"
|
||||
)
|
||||
|
||||
func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error {
|
||||
@ -141,9 +143,13 @@ func (plugin *glusterfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
source, _ := plugin.getGlusterVolumeSource(spec)
|
||||
source, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get gluster volumesource: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
epName := source.EndpointsName
|
||||
// PVC/POD is in same ns.
|
||||
// PVC/POD is in same namespace.
|
||||
podNs := pod.Namespace
|
||||
kubeClient := plugin.host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
@ -151,35 +157,27 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
|
||||
}
|
||||
ep, err := kubeClient.CoreV1().Endpoints(podNs).Get(epName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get endpoints %s[%v]", epName, err)
|
||||
glog.Errorf("failed to get endpoint %s: %v", epName, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(1).Infof("glusterfs pv endpoint %v", ep)
|
||||
glog.V(4).Infof("glusterfs pv endpoint %v", ep)
|
||||
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool) {
|
||||
// Glusterfs volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// Glusterfs volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
||||
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly
|
||||
}
|
||||
return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
|
||||
source, readOnly := plugin.getGlusterVolumeSource(spec)
|
||||
source, readOnly, _ := getVolumeSource(spec)
|
||||
return &glusterfsMounter{
|
||||
glusterfs: &glusterfs{
|
||||
volName: spec.Name(),
|
||||
mounter: mounter,
|
||||
pod: pod,
|
||||
plugin: plugin,
|
||||
volName: spec.Name(),
|
||||
mounter: mounter,
|
||||
pod: pod,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(pod.UID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), spec.Name())),
|
||||
},
|
||||
hosts: ep,
|
||||
path: source.Path,
|
||||
readOnly: readOnly,
|
||||
mountOptions: volume.MountOptionFromSpec(spec),
|
||||
mountOptions: volutil.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -189,16 +187,17 @@ func (plugin *glusterfsPlugin) NewUnmounter(volName string, podUID types.UID) (v
|
||||
|
||||
func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &glusterfsUnmounter{&glusterfs{
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
|
||||
plugin: plugin,
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(podUID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), volName)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
|
||||
// To reconstrcut volume spec we need endpoint where fetching endpoint from mount
|
||||
// To reconstruct volume spec we need endpoint where fetching endpoint from mount
|
||||
// string looks to be impossible, so returning error.
|
||||
|
||||
return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath")
|
||||
@ -210,7 +209,7 @@ type glusterfs struct {
|
||||
pod *v1.Pod
|
||||
mounter mount.Interface
|
||||
plugin *glusterfsPlugin
|
||||
volume.MetricsNil
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
type glusterfsMounter struct {
|
||||
@ -238,7 +237,7 @@ func (b *glusterfsMounter) CanMount() error {
|
||||
exe := b.plugin.host.GetExec(b.plugin.GetPluginName())
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if _, err := exe.Run("/bin/ls", gciLinuxGlusterMountBinaryPath); err != nil {
|
||||
if _, err := exe.Run("test", "-x", gciLinuxGlusterMountBinaryPath); err != nil {
|
||||
return fmt.Errorf("Required binary %s is missing", gciLinuxGlusterMountBinaryPath)
|
||||
}
|
||||
}
|
||||
@ -259,8 +258,9 @@ func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
|
||||
os.MkdirAll(dir, 0750)
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
err = b.setUpAtInternal(dir)
|
||||
if err == nil {
|
||||
return nil
|
||||
@ -300,7 +300,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||
|
||||
p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName)
|
||||
if err := os.MkdirAll(p, 0750); err != nil {
|
||||
return fmt.Errorf("Error creating directory %v: %v", p, err)
|
||||
return fmt.Errorf("failed to create directory %v: %v", p, err)
|
||||
}
|
||||
|
||||
// adding log-level ERROR to remove noise
|
||||
@ -312,7 +312,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||
|
||||
var addrlist []string
|
||||
if b.hosts == nil {
|
||||
return fmt.Errorf("glusterfs: endpoint is nil")
|
||||
return fmt.Errorf("glusterfs endpoint is nil in mounter")
|
||||
}
|
||||
addr := sets.String{}
|
||||
if b.hosts.Subsets != nil {
|
||||
@ -327,18 +327,18 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||
|
||||
}
|
||||
options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":"))
|
||||
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
|
||||
mountOptions := volutil.JoinMountOptions(b.mountOptions, options)
|
||||
|
||||
// with `backup-volfile-servers` mount option in place, it is not required to
|
||||
// iterate over all the servers in the addrlist. A mount attempt with this option
|
||||
// will fetch all the servers mentioned in the backup-volfile-servers list.
|
||||
// Refer backup-volfile-servers @ https://access.redhat.com/documentation/en-US/Red_Hat_Storage/3/html/Administration_Guide/sect-Native_Client.html
|
||||
// Refer to backup-volfile-servers @ http://docs.gluster.org/en/latest/Administrator%20Guide/Setting%20Up%20Clients/
|
||||
|
||||
if (len(addrlist) > 0) && (addrlist[0] != "") {
|
||||
ip := addrlist[0]
|
||||
errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions)
|
||||
if errs == nil {
|
||||
glog.Infof("glusterfs: successfully mounted %s", dir)
|
||||
glog.Infof("successfully mounted directory %s", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -374,8 +374,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||
|
||||
}
|
||||
|
||||
func getVolumeSource(
|
||||
spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool, error) {
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
||||
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
@ -402,17 +401,18 @@ func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptio
|
||||
}
|
||||
|
||||
type provisionerConfig struct {
|
||||
url string
|
||||
user string
|
||||
userKey string
|
||||
secretNamespace string
|
||||
secretName string
|
||||
secretValue string
|
||||
clusterID string
|
||||
gidMin int
|
||||
gidMax int
|
||||
volumeType gapi.VolumeDurabilityInfo
|
||||
volumeOptions []string
|
||||
url string
|
||||
user string
|
||||
userKey string
|
||||
secretNamespace string
|
||||
secretName string
|
||||
secretValue string
|
||||
clusterID string
|
||||
gidMin int
|
||||
gidMax int
|
||||
volumeType gapi.VolumeDurabilityInfo
|
||||
volumeOptions []string
|
||||
volumeNamePrefix string
|
||||
}
|
||||
|
||||
type glusterfsVolumeProvisioner struct {
|
||||
@ -424,11 +424,11 @@ type glusterfsVolumeProvisioner struct {
|
||||
func convertGid(gidString string) (int, error) {
|
||||
gid64, err := strconv.ParseInt(gidString, 10, 32)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse gid %v ", gidString)
|
||||
return 0, fmt.Errorf("failed to parse gid %v: %v", gidString, err)
|
||||
}
|
||||
|
||||
if gid64 < 0 {
|
||||
return 0, fmt.Errorf("negative GIDs are not allowed: %v", gidString)
|
||||
return 0, fmt.Errorf("negative GIDs %v are not allowed", gidString)
|
||||
}
|
||||
|
||||
// ParseInt returns a int64, but since we parsed only
|
||||
@ -441,7 +441,7 @@ func convertVolumeParam(volumeString string) (int, error) {
|
||||
|
||||
count, err := strconv.Atoi(volumeString)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse %q", volumeString)
|
||||
return 0, fmt.Errorf("failed to parse volumestring %q: %v", volumeString, err)
|
||||
}
|
||||
|
||||
if count < 0 {
|
||||
@ -490,7 +490,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
||||
}
|
||||
pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get existing persistent volumes")
|
||||
glog.Error("failed to get existing persistent volumes")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -501,24 +501,24 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
||||
|
||||
pvName := pv.ObjectMeta.Name
|
||||
|
||||
gidStr, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]
|
||||
gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey]
|
||||
|
||||
if !ok {
|
||||
glog.Warningf("no GID found in pv '%v'", pvName)
|
||||
glog.Warningf("no GID found in pv %v", pvName)
|
||||
continue
|
||||
}
|
||||
|
||||
gid, err := convertGid(gidStr)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
glog.Errorf("failed to parse gid %s: %v", gidStr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = gidTable.Allocate(gid)
|
||||
if err == ErrConflict {
|
||||
glog.Warningf("GID %v found in pv %v was already allocated", gid)
|
||||
glog.Warningf("GID %v found in pv %v was already allocated", gid, pvName)
|
||||
} else if err != nil {
|
||||
glog.Errorf("failed to store gid %v found in pv '%v': %v", gid, pvName, err)
|
||||
glog.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -563,7 +563,6 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
|
||||
}
|
||||
|
||||
// if in the meantime a table appeared, use it
|
||||
|
||||
plugin.gidTableLock.Lock()
|
||||
defer plugin.gidTableLock.Unlock()
|
||||
|
||||
@ -583,7 +582,7 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
|
||||
}
|
||||
|
||||
func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
||||
gidStr, ok := d.spec.Annotations[volumehelper.VolumeGidAnnotationKey]
|
||||
gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey]
|
||||
|
||||
if !ok {
|
||||
return 0, false, nil
|
||||
@ -595,9 +594,14 @@ func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
||||
}
|
||||
|
||||
func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
glog.V(2).Infof("delete volume: %s ", d.glusterfsMounter.path)
|
||||
glog.V(2).Infof("delete volume %s", d.glusterfsMounter.path)
|
||||
|
||||
volumeName := d.glusterfsMounter.path
|
||||
volumeID := dstrings.TrimPrefix(volumeName, volPrefix)
|
||||
volumeID, err := getVolumeID(d.spec, volumeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get volumeID: %v", err)
|
||||
}
|
||||
|
||||
class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -609,7 +613,7 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
}
|
||||
d.provisionerConfig = *cfg
|
||||
|
||||
glog.V(4).Infof("deleting volume %q with configuration %+v", volumeID, d.provisionerConfig)
|
||||
glog.V(4).Infof("deleting volume %q", volumeID)
|
||||
|
||||
gid, exists, err := d.getGid()
|
||||
if err != nil {
|
||||
@ -628,17 +632,17 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
|
||||
cli := gcli.NewClient(d.url, d.user, d.secretValue)
|
||||
if cli == nil {
|
||||
glog.Errorf("failed to create glusterfs rest client")
|
||||
return fmt.Errorf("failed to create glusterfs rest client, REST server authentication failed")
|
||||
glog.Errorf("failed to create glusterfs REST client")
|
||||
return fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||
}
|
||||
err = cli.VolumeDelete(volumeID)
|
||||
if err != nil {
|
||||
glog.Errorf("error when deleting the volume :%v", err)
|
||||
glog.Errorf("failed to delete volume %s: %v", volumeName, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("volume %s deleted successfully", volumeName)
|
||||
|
||||
//Deleter takes endpoint and endpointnamespace from pv spec.
|
||||
//Deleter takes endpoint and namespace from pv spec.
|
||||
pvSpec := d.spec.Spec
|
||||
var dynamicEndpoint, dynamicNamespace string
|
||||
if pvSpec.ClaimRef == nil {
|
||||
@ -653,18 +657,18 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
if pvSpec.Glusterfs.EndpointsName != "" {
|
||||
dynamicEndpoint = pvSpec.Glusterfs.EndpointsName
|
||||
}
|
||||
glog.V(3).Infof("dynamic namespace and endpoint : [%v/%v]", dynamicNamespace, dynamicEndpoint)
|
||||
glog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint)
|
||||
err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint)
|
||||
if err != nil {
|
||||
glog.Errorf("error when deleting endpoint/service :%v", err)
|
||||
glog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err)
|
||||
} else {
|
||||
glog.V(1).Infof("endpoint: %v and service: %v deleted successfully ", dynamicNamespace, dynamicEndpoint)
|
||||
glog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
@ -693,21 +697,23 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
return nil, fmt.Errorf("failed to reserve GID from table: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Allocated GID [%d] for PVC %s", gid, p.options.PVC.Name)
|
||||
glog.V(2).Infof("Allocated GID %d for PVC %s", gid, p.options.PVC.Name)
|
||||
|
||||
glusterfs, sizeGB, err := p.CreateVolume(gid)
|
||||
glusterfs, sizeGiB, volID, err := p.CreateVolume(gid)
|
||||
if err != nil {
|
||||
if releaseErr := gidTable.Release(gid); releaseErr != nil {
|
||||
glog.Errorf("error when releasing GID in storageclass: %s", scName)
|
||||
glog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr)
|
||||
}
|
||||
|
||||
glog.Errorf("create volume error: %v.", err)
|
||||
return nil, fmt.Errorf("create volume error: %v", err)
|
||||
glog.Errorf("failed to create volume: %v", err)
|
||||
return nil, fmt.Errorf("failed to create volume: %v", err)
|
||||
}
|
||||
mode := v1.PersistentVolumeFilesystem
|
||||
pv := new(v1.PersistentVolume)
|
||||
pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
|
||||
pv.Spec.PersistentVolumeReclaimPolicy = p.options.PersistentVolumeReclaimPolicy
|
||||
pv.Spec.AccessModes = p.options.PVC.Spec.AccessModes
|
||||
pv.Spec.VolumeMode = &mode
|
||||
if len(pv.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = p.plugin.GetAccessModes()
|
||||
}
|
||||
@ -716,74 +722,83 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
gidStr := strconv.FormatInt(int64(gid), 10)
|
||||
|
||||
pv.Annotations = map[string]string{
|
||||
volumehelper.VolumeGidAnnotationKey: gidStr,
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: heketiAnn,
|
||||
glusterTypeAnn: "file",
|
||||
"Description": glusterDescAnn,
|
||||
v1.MountOptionAnnotation: "auto_unmount",
|
||||
volutil.VolumeGidAnnotationKey: gidStr,
|
||||
volutil.VolumeDynamicallyCreatedByKey: heketiAnn,
|
||||
glusterTypeAnn: "file",
|
||||
"Description": glusterDescAnn,
|
||||
v1.MountOptionAnnotation: "auto_unmount",
|
||||
heketiVolIDAnn: volID,
|
||||
}
|
||||
|
||||
pv.Spec.Capacity = v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)),
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
|
||||
}
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) {
|
||||
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, volID string, err error) {
|
||||
var clusterIDs []string
|
||||
customVolumeName := ""
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// Glusterfs creates volumes in units of GBs
|
||||
sz := int(volume.RoundUpSize(volSizeBytes, 1000*1000*1000))
|
||||
glog.V(2).Infof("create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisionerConfig)
|
||||
|
||||
// GlusterFS/heketi creates volumes in units of GiB.
|
||||
sz := int(volutil.RoundUpToGiB(capacity))
|
||||
glog.V(2).Infof("create volume of size %dGiB", sz)
|
||||
|
||||
if p.url == "" {
|
||||
glog.Errorf("REST server endpoint is empty")
|
||||
return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
||||
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
||||
}
|
||||
cli := gcli.NewClient(p.url, p.user, p.secretValue)
|
||||
if cli == nil {
|
||||
glog.Errorf("failed to create glusterfs rest client")
|
||||
return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||
glog.Errorf("failed to create glusterfs REST client")
|
||||
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||
}
|
||||
if p.provisionerConfig.clusterID != "" {
|
||||
clusterIDs = dstrings.Split(p.clusterID, ",")
|
||||
glog.V(4).Infof("provided clusterIDs: %v", clusterIDs)
|
||||
glog.V(4).Infof("provided clusterIDs %v", clusterIDs)
|
||||
}
|
||||
|
||||
if p.provisionerConfig.volumeNamePrefix != "" {
|
||||
customVolumeName = fmt.Sprintf("%s_%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Namespace, p.options.PVC.Name, uuid.NewUUID())
|
||||
}
|
||||
|
||||
gid64 := int64(gid)
|
||||
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions}
|
||||
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions}
|
||||
volume, err := cli.VolumeCreate(volumeReq)
|
||||
if err != nil {
|
||||
glog.Errorf("error creating volume %v ", err)
|
||||
return nil, 0, fmt.Errorf("error creating volume %v", err)
|
||||
glog.Errorf("failed to create volume: %v", err)
|
||||
return nil, 0, "", fmt.Errorf("failed to create volume: %v", err)
|
||||
}
|
||||
glog.V(1).Infof("volume with size: %d and name: %s created", volume.Size, volume.Name)
|
||||
glog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name)
|
||||
volID = volume.Id
|
||||
dynamicHostIps, err := getClusterNodes(cli, volume.Cluster)
|
||||
if err != nil {
|
||||
glog.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume)
|
||||
return nil, 0, fmt.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume)
|
||||
glog.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
|
||||
return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
|
||||
}
|
||||
|
||||
// The 'endpointname' is created in form of 'glusterfs-dynamic-<claimname>'.
|
||||
// createEndpointService() checks for this 'endpoint' existence in PVC's namespace and
|
||||
// If not found, it create an endpoint and svc using the IPs we dynamically picked at time
|
||||
// If not found, it create an endpoint and service using the IPs we dynamically picked at time
|
||||
// of volume creation.
|
||||
epServiceName := dynamicEpSvcPrefix + p.options.PVC.Name
|
||||
epNamespace := p.options.PVC.Namespace
|
||||
endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create endpoint/service: %v", err)
|
||||
glog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
|
||||
deleteErr := cli.VolumeDelete(volume.Id)
|
||||
if deleteErr != nil {
|
||||
glog.Errorf("error when deleting the volume :%v , manual deletion required", deleteErr)
|
||||
glog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr)
|
||||
}
|
||||
return nil, 0, fmt.Errorf("failed to create endpoint/service %v", err)
|
||||
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
|
||||
}
|
||||
glog.V(3).Infof("dynamic ep %v and svc : %v ", endpoint, service)
|
||||
glog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service)
|
||||
return &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: endpoint.Name,
|
||||
Path: volume.Name,
|
||||
ReadOnly: false,
|
||||
}, sz, nil
|
||||
}, sz, volID, nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) {
|
||||
@ -811,12 +826,12 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
|
||||
}
|
||||
_, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint)
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
glog.V(1).Infof("endpoint [%s] already exist in namespace [%s]", endpoint, namespace)
|
||||
glog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace)
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create endpoint: %v", err)
|
||||
return nil, nil, fmt.Errorf("error creating endpoint: %v", err)
|
||||
return nil, nil, fmt.Errorf("failed to create endpoint: %v", err)
|
||||
}
|
||||
service = &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -831,7 +846,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
|
||||
{Protocol: "TCP", Port: 1}}}}
|
||||
_, err = kubeClient.CoreV1().Services(namespace).Create(service)
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
glog.V(1).Infof("service [%s] already exist in namespace [%s]", service, namespace)
|
||||
glog.V(1).Infof("service %s already exist in namespace %s", service, namespace)
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
@ -848,10 +863,10 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi
|
||||
}
|
||||
err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err)
|
||||
return fmt.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err)
|
||||
glog.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err)
|
||||
return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err)
|
||||
}
|
||||
glog.V(1).Infof("service/endpoint %s/%s deleted successfully", namespace, epServiceName)
|
||||
glog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -859,7 +874,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi
|
||||
func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) {
|
||||
secretMap, err := volutil.GetSecretForPV(namespace, secretName, glusterfsPluginName, kubeClient)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err)
|
||||
glog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err)
|
||||
return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err)
|
||||
}
|
||||
if len(secretMap) == 0 {
|
||||
@ -872,12 +887,12 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (
|
||||
}
|
||||
secret = v
|
||||
}
|
||||
|
||||
// If not found, the last secret in the map wins as done before
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
// getClusterNodes() returns the cluster nodes of a given cluster
|
||||
|
||||
func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) {
|
||||
clusterinfo, err := cli.ClusterInfo(cluster)
|
||||
if err != nil {
|
||||
@ -889,15 +904,15 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string,
|
||||
// of the cluster on which provisioned volume belongs to, as there can be multiple
|
||||
// clusters.
|
||||
for _, node := range clusterinfo.Nodes {
|
||||
nodei, err := cli.NodeInfo(string(node))
|
||||
nodeInfo, err := cli.NodeInfo(string(node))
|
||||
if err != nil {
|
||||
glog.Errorf(" failed to get hostip: %v", err)
|
||||
return nil, fmt.Errorf("failed to get hostip: %v", err)
|
||||
glog.Errorf("failed to get host ipaddress: %v", err)
|
||||
return nil, fmt.Errorf("failed to get host ipaddress: %v", err)
|
||||
}
|
||||
ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "")
|
||||
ipaddr := dstrings.Join(nodeInfo.NodeAddRequest.Hostnames.Storage, "")
|
||||
dynamicHostIps = append(dynamicHostIps, ipaddr)
|
||||
}
|
||||
glog.V(3).Infof("hostlist :%v", dynamicHostIps)
|
||||
glog.V(3).Infof("host list :%v", dynamicHostIps)
|
||||
if len(dynamicHostIps) == 0 {
|
||||
glog.Errorf("no hosts found: %v", err)
|
||||
return nil, fmt.Errorf("no hosts found: %v", err)
|
||||
@ -905,7 +920,7 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string,
|
||||
return dynamicHostIps, nil
|
||||
}
|
||||
|
||||
// parseClassParameters parses StorageClass.Parameters
|
||||
// parseClassParameters parses StorageClass parameters.
|
||||
func parseClassParameters(params map[string]string, kubeClient clientset.Interface) (*provisionerConfig, error) {
|
||||
var cfg provisionerConfig
|
||||
var err error
|
||||
@ -915,6 +930,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
authEnabled := true
|
||||
parseVolumeType := ""
|
||||
parseVolumeOptions := ""
|
||||
parseVolumeNamePrefix := ""
|
||||
|
||||
for k, v := range params {
|
||||
switch dstrings.ToLower(k) {
|
||||
@ -937,7 +953,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
case "gidmin":
|
||||
parseGidMin, err := convertGid(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
return nil, fmt.Errorf("invalid gidMin value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
if parseGidMin < absoluteGidMin {
|
||||
return nil, fmt.Errorf("gidMin must be >= %v", absoluteGidMin)
|
||||
@ -949,7 +965,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
case "gidmax":
|
||||
parseGidMax, err := convertGid(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
return nil, fmt.Errorf("invalid gidMax value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
if parseGidMax < absoluteGidMin {
|
||||
return nil, fmt.Errorf("gidMax must be >= %v", absoluteGidMin)
|
||||
@ -965,7 +981,10 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(v) != 0 {
|
||||
parseVolumeOptions = v
|
||||
}
|
||||
|
||||
case "volumenameprefix":
|
||||
if len(v) != 0 {
|
||||
parseVolumeNamePrefix = v
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
@ -985,7 +1004,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(parseVolumeTypeInfo) >= 2 {
|
||||
newReplicaCount, err := convertVolumeParam(parseVolumeTypeInfo[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", err, parseVolumeTypeInfo[1], "volumetype", glusterfsPluginName)
|
||||
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err)
|
||||
}
|
||||
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: newReplicaCount}}
|
||||
} else {
|
||||
@ -995,11 +1014,11 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(parseVolumeTypeInfo) >= 3 {
|
||||
newDisperseData, err := convertVolumeParam(parseVolumeTypeInfo[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", parseVolumeTypeInfo[1], err, "volumetype", glusterfsPluginName)
|
||||
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err)
|
||||
}
|
||||
newDisperseRedundancy, err := convertVolumeParam(parseVolumeTypeInfo[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", err, parseVolumeTypeInfo[2], "volumetype", glusterfsPluginName)
|
||||
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[2], err)
|
||||
}
|
||||
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityEC, Disperse: gapi.DisperseDurability{Data: newDisperseData, Redundancy: newDisperseRedundancy}}
|
||||
} else {
|
||||
@ -1020,6 +1039,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
}
|
||||
|
||||
if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 {
|
||||
|
||||
// secretName + Namespace has precedence over userKey
|
||||
if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 {
|
||||
cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient)
|
||||
@ -1040,23 +1060,52 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(parseVolumeOptions) != 0 {
|
||||
volOptions := dstrings.Split(parseVolumeOptions, ",")
|
||||
if len(volOptions) == 0 {
|
||||
return nil, fmt.Errorf("StorageClass for provisioner %q must have valid ( for eg, 'client.ssl on') volume option", glusterfsPluginName)
|
||||
return nil, fmt.Errorf("StorageClass for provisioner %q must have valid (for e.g., 'client.ssl on') volume option", glusterfsPluginName)
|
||||
}
|
||||
cfg.volumeOptions = volOptions
|
||||
|
||||
}
|
||||
|
||||
if len(parseVolumeNamePrefix) != 0 {
|
||||
if dstrings.Contains(parseVolumeNamePrefix, "_") {
|
||||
return nil, fmt.Errorf("Storageclass parameter 'volumenameprefix' should not contain '_' in its value")
|
||||
}
|
||||
cfg.volumeNamePrefix = parseVolumeNamePrefix
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// getVolumeID returns volumeID from the PV or volumename.
|
||||
func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) {
|
||||
volumeID := ""
|
||||
|
||||
// Get volID from pvspec if available, else fill it from volumename.
|
||||
if pv != nil {
|
||||
if pv.Annotations[heketiVolIDAnn] != "" {
|
||||
volumeID = pv.Annotations[heketiVolIDAnn]
|
||||
} else {
|
||||
volumeID = dstrings.TrimPrefix(volumeName, volPrefix)
|
||||
}
|
||||
} else {
|
||||
return volumeID, fmt.Errorf("provided PV spec is nil")
|
||||
}
|
||||
if volumeID == "" {
|
||||
return volumeID, fmt.Errorf("volume ID is empty")
|
||||
}
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
pvSpec := spec.PersistentVolume.Spec
|
||||
glog.V(2).Infof("Request to expand volume: %s ", pvSpec.Glusterfs.Path)
|
||||
volumeName := pvSpec.Glusterfs.Path
|
||||
glog.V(2).Infof("Received request to expand volume %s", volumeName)
|
||||
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
|
||||
|
||||
// Fetch the volume for expansion.
|
||||
volumeID := dstrings.TrimPrefix(volumeName, volPrefix)
|
||||
if err != nil {
|
||||
return oldSize, fmt.Errorf("failed to get volumeID for volume %s: %v", volumeName, err)
|
||||
}
|
||||
|
||||
//Get details of SC.
|
||||
//Get details of StorageClass.
|
||||
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
@ -1066,30 +1115,44 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Expanding volume %q with configuration %+v", volumeID, cfg)
|
||||
glog.V(4).Infof("expanding volume: %q with configuration: %+v", volumeID, cfg)
|
||||
|
||||
//Create REST server connection
|
||||
cli := gcli.NewClient(cfg.url, cfg.user, cfg.secretValue)
|
||||
if cli == nil {
|
||||
glog.Errorf("failed to create glusterfs rest client")
|
||||
return oldSize, fmt.Errorf("failed to create glusterfs rest client, REST server authentication failed")
|
||||
glog.Errorf("failed to create glusterfs REST client")
|
||||
return oldSize, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||
}
|
||||
|
||||
// Find out delta size
|
||||
expansionSize := (newSize.Value() - oldSize.Value())
|
||||
expansionSizeGB := int(volume.RoundUpSize(expansionSize, 1000*1000*1000))
|
||||
expansionSizeGiB := int(volutil.RoundUpSize(expansionSize, volutil.GIB))
|
||||
|
||||
// Find out requested Size
|
||||
requestGiB := volutil.RoundUpToGiB(newSize)
|
||||
|
||||
//Check the existing volume size
|
||||
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
|
||||
if err != nil {
|
||||
glog.Errorf("error when fetching details of volume %s: %v", volumeName, err)
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
if int64(currentVolumeInfo.Size) >= requestGiB {
|
||||
return newSize, nil
|
||||
}
|
||||
|
||||
// Make volume expansion request
|
||||
volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGB}
|
||||
volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGiB}
|
||||
|
||||
// Expand the volume
|
||||
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
|
||||
if err != nil {
|
||||
glog.Errorf("error when expanding the volume :%v", err)
|
||||
glog.Errorf("failed to expand volume %s: %v", volumeName, err)
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size)
|
||||
newVolumeSize := resource.MustParse(fmt.Sprintf("%dG", volumeInfoRes.Size))
|
||||
newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size))
|
||||
return newVolumeSize, nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go
generated
vendored
@ -27,24 +27,24 @@ import (
|
||||
// readGlusterLog will take the last 2 lines of the log file
|
||||
// on failure of gluster SetUp and return those so kubelet can
|
||||
// properly expose them
|
||||
// return nil on any failure
|
||||
// return error on any failure
|
||||
func readGlusterLog(path string, podName string) error {
|
||||
|
||||
var line1 string
|
||||
var line2 string
|
||||
linecount := 0
|
||||
|
||||
glog.Infof("glusterfs: failure, now attempting to read the gluster log for pod %s", podName)
|
||||
glog.Infof("failure, now attempting to read the gluster log for pod %s", podName)
|
||||
|
||||
// Check and make sure path exists
|
||||
if len(path) == 0 {
|
||||
return fmt.Errorf("glusterfs: log file does not exist for pod: %s", podName)
|
||||
return fmt.Errorf("log file does not exist for pod %s", podName)
|
||||
}
|
||||
|
||||
// open the log file
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("glusterfs: could not open log file for pod: %s", podName)
|
||||
return fmt.Errorf("could not open log file for pod %s", podName)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD
generated
vendored
@ -16,7 +16,8 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//pkg/volume/validation:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -28,8 +29,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["host_path_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/host_path",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go
generated
vendored
@ -27,7 +27,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||
"k8s.io/kubernetes/pkg/volume/validation"
|
||||
)
|
||||
|
||||
@ -129,13 +130,13 @@ func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (vo
|
||||
// Recycle recycles/scrubs clean a HostPath volume.
|
||||
// Recycle blocks until the pod has completed or any error occurs.
|
||||
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
|
||||
func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) error {
|
||||
func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
}
|
||||
|
||||
pod := plugin.config.RecyclerPodTemplate
|
||||
timeout := volume.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
|
||||
timeout := util.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
|
||||
// overrides
|
||||
pod.Spec.ActiveDeadlineSeconds = &timeout
|
||||
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
||||
@ -143,7 +144,7 @@ func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRec
|
||||
Path: spec.PersistentVolume.Spec.HostPath.Path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
|
||||
return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
@ -272,7 +273,7 @@ func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: r.options.PVName,
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: "hostpath-dynamic-provisioner",
|
||||
util.VolumeDynamicallyCreatedByKey: "hostpath-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD
generated
vendored
@ -17,14 +17,17 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/iscsi",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -34,8 +37,7 @@ go_test(
|
||||
"iscsi_test.go",
|
||||
"iscsi_util_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/iscsi",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
119
vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go
generated
vendored
119
vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go
generated
vendored
@ -19,13 +19,13 @@ package iscsi
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -66,7 +66,7 @@ func (attacher *iscsiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName
|
||||
}
|
||||
|
||||
func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
|
||||
mounter, err := attacher.volumeSpecToMounter(spec, attacher.host, pod)
|
||||
mounter, err := volumeSpecToMounter(spec, attacher.host, pod)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get iscsi mounter: %v", err)
|
||||
return "", err
|
||||
@ -76,7 +76,7 @@ func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath strin
|
||||
|
||||
func (attacher *iscsiAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
mounter, err := attacher.volumeSpecToMounter(spec, attacher.host, nil)
|
||||
mounter, err := volumeSpecToMounter(spec, attacher.host, nil)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get iscsi mounter: %v", err)
|
||||
return "", err
|
||||
@ -112,7 +112,7 @@ func (attacher *iscsiAttacher) MountDevice(spec *volume.Spec, devicePath string,
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(iscsiPluginName)}
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, fsType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
@ -143,7 +143,7 @@ func (detacher *iscsiDetacher) Detach(volumeName string, nodeName types.NodeName
|
||||
}
|
||||
|
||||
func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
unMounter := detacher.volumeSpecToUnmounter(detacher.mounter)
|
||||
unMounter := volumeSpecToUnmounter(detacher.mounter, detacher.host)
|
||||
err := detacher.manager.DetachDisk(*unMounter, deviceMountPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err)
|
||||
@ -157,94 +157,49 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) {
|
||||
func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) {
|
||||
var secret map[string]string
|
||||
var bkportal []string
|
||||
readOnly, fsType, err := getISCSIVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var podUID types.UID
|
||||
if pod != nil {
|
||||
chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec)
|
||||
secret, err = createSecretMap(spec, &iscsiPlugin{host: host}, pod.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chapSession, err := getISCSISessionCHAPInfo(spec)
|
||||
podUID = pod.UID
|
||||
}
|
||||
iscsiDisk, err := createISCSIDisk(spec,
|
||||
podUID,
|
||||
&iscsiPlugin{host: host},
|
||||
&ISCSIUtil{},
|
||||
secret,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exec := host.GetExec(iscsiPluginName)
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := volumeutil.GetVolumeMode(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chapDiscovery || chapSession {
|
||||
secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, pod.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(secretNamespace) == 0 || len(secretName) == 0 {
|
||||
return nil, fmt.Errorf("CHAP enabled but secret name or namespace is empty")
|
||||
}
|
||||
// if secret is provided, retrieve it
|
||||
kubeClient := host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return nil, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
secretObj, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err)
|
||||
return nil, err
|
||||
}
|
||||
secret = make(map[string]string)
|
||||
for name, data := range secretObj.Data {
|
||||
glog.V(6).Infof("retrieving CHAP secret name: %s", name)
|
||||
secret[name] = string(data)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(5).Infof("iscsi: VolumeSpecToMounter volumeMode %s", volumeMode)
|
||||
return &iscsiDiskMounter{
|
||||
iscsiDisk: iscsiDisk,
|
||||
fsType: fsType,
|
||||
volumeMode: volumeMode,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(iscsiPluginName), Exec: exec},
|
||||
exec: exec,
|
||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lun := strconv.Itoa(int(lunStr))
|
||||
portal := portalMounter(tp)
|
||||
bkportal = append(bkportal, portal)
|
||||
for _, p := range portals {
|
||||
bkportal = append(bkportal, portalMounter(string(p)))
|
||||
}
|
||||
|
||||
iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var initiatorName string
|
||||
if initiatorNamePtr != nil {
|
||||
initiatorName = *initiatorNamePtr
|
||||
}
|
||||
chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chapSession, err := getISCSISessionCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exec := attacher.host.GetExec(iscsiPluginName)
|
||||
|
||||
return &iscsiDiskMounter{
|
||||
iscsiDisk: &iscsiDisk{
|
||||
plugin: &iscsiPlugin{
|
||||
host: host,
|
||||
},
|
||||
VolName: spec.Name(),
|
||||
Portals: bkportal,
|
||||
Iqn: iqn,
|
||||
lun: lun,
|
||||
Iface: iface,
|
||||
chap_discovery: chapDiscovery,
|
||||
chap_session: chapSession,
|
||||
secret: secret,
|
||||
InitiatorName: initiatorName,
|
||||
manager: &ISCSIUtil{}},
|
||||
iscsiDisk: iscsiDisk,
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(iscsiPluginName), Exec: exec},
|
||||
@ -253,8 +208,8 @@ func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volum
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (detacher *iscsiDetacher) volumeSpecToUnmounter(mounter mount.Interface) *iscsiDiskUnmounter {
|
||||
exec := detacher.host.GetExec(iscsiPluginName)
|
||||
func volumeSpecToUnmounter(mounter mount.Interface, host volume.VolumeHost) *iscsiDiskUnmounter {
|
||||
exec := host.GetExec(iscsiPluginName)
|
||||
return &iscsiDiskUnmounter{
|
||||
iscsiDisk: &iscsiDisk{
|
||||
plugin: &iscsiPlugin{},
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go
generated
vendored
@ -22,20 +22,25 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// Abstract interface to disk operations.
|
||||
type diskManager interface {
|
||||
MakeGlobalPDName(disk iscsiDisk) string
|
||||
MakeGlobalVDPDName(disk iscsiDisk) string
|
||||
// Attaches the disk to the kubelet's host machine.
|
||||
AttachDisk(b iscsiDiskMounter) (string, error)
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
DetachDisk(disk iscsiDiskUnmounter, mntPath string) error
|
||||
// Detaches the block disk from the kubelet's host machine.
|
||||
DetachBlockISCSIDisk(disk iscsiDiskUnmapper, mntPath string) error
|
||||
}
|
||||
|
||||
// utility to mount a disk based filesystem
|
||||
// globalPDPath: global mount path like, /var/lib/kubelet/plugins/kubernetes.io/iscsi/{ifaceName}/{portal-some_iqn-lun-lun_id}
|
||||
// volPath: pod volume dir path like, /var/lib/kubelet/pods/{podUID}/volumes/kubernetes.io~iscsi/{volumeName}
|
||||
func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate mountpoint: %s", volPath)
|
||||
@ -59,7 +64,7 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter
|
||||
b.iscsiDisk.Iface = b.iscsiDisk.Portals[0] + ":" + b.iscsiDisk.VolName
|
||||
}
|
||||
globalPDPath := manager.MakeGlobalPDName(*b.iscsiDisk)
|
||||
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
|
||||
mountOptions := util.JoinMountOptions(b.mountOptions, options)
|
||||
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
|
||||
|
414
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go
generated
vendored
414
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go
generated
vendored
@ -18,6 +18,8 @@ package iscsi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -29,6 +31,7 @@ import (
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
ioutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
@ -42,6 +45,7 @@ type iscsiPlugin struct {
|
||||
|
||||
var _ volume.VolumePlugin = &iscsiPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &iscsiPlugin{}
|
||||
var _ volume.BlockVolumePlugin = &iscsiPlugin{}
|
||||
|
||||
const (
|
||||
iscsiPluginName = "kubernetes.io/iscsi"
|
||||
@ -66,11 +70,7 @@ func (plugin *iscsiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
if (spec.Volume != nil && spec.Volume.ISCSI == nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.ISCSI == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return (spec.Volume != nil && spec.Volume.ISCSI != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.ISCSI != nil)
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) RequiresRemount() bool {
|
||||
@ -93,109 +93,71 @@ func (plugin *iscsiPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
var secret map[string]string
|
||||
if pod == nil {
|
||||
return nil, fmt.Errorf("nil pod")
|
||||
}
|
||||
chapDiscover, err := getISCSIDiscoveryCHAPInfo(spec)
|
||||
secret, err := createSecretMap(spec, plugin, pod.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chapSession, err := getISCSISessionCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chapDiscover || chapSession {
|
||||
secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, pod.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(secretName) > 0 && len(secretNamespace) > 0 {
|
||||
// if secret is provideded, retrieve it
|
||||
kubeClient := plugin.host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return nil, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
secretObj, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err)
|
||||
return nil, err
|
||||
}
|
||||
secret = make(map[string]string)
|
||||
for name, data := range secretObj.Data {
|
||||
glog.V(4).Infof("retrieving CHAP secret name: %s", name)
|
||||
secret[name] = string(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret)
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.Mounter, error) {
|
||||
// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
readOnly, fsType, err := getISCSIVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec)
|
||||
iscsiDisk, err := createISCSIDisk(spec, podUID, plugin, manager, secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lun := strconv.Itoa(int(lunStr))
|
||||
portal := portalMounter(tp)
|
||||
var bkportal []string
|
||||
bkportal = append(bkportal, portal)
|
||||
for _, p := range portals {
|
||||
bkportal = append(bkportal, portalMounter(string(p)))
|
||||
}
|
||||
|
||||
iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var initiatorName string
|
||||
if initiatorNamePtr != nil {
|
||||
initiatorName = *initiatorNamePtr
|
||||
}
|
||||
chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chapSession, err := getISCSISessionCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &iscsiDiskMounter{
|
||||
iscsiDisk: &iscsiDisk{
|
||||
podUID: podUID,
|
||||
VolName: spec.Name(),
|
||||
Portals: bkportal,
|
||||
Iqn: iqn,
|
||||
lun: lun,
|
||||
Iface: iface,
|
||||
chap_discovery: chapDiscovery,
|
||||
chap_session: chapSession,
|
||||
secret: secret,
|
||||
InitiatorName: initiatorName,
|
||||
manager: manager,
|
||||
plugin: plugin},
|
||||
iscsiDisk: iscsiDisk,
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
exec: exec,
|
||||
deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
|
||||
mountOptions: volume.MountOptionFromSpec(spec),
|
||||
mountOptions: ioutil.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
|
||||
func (plugin *iscsiPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
|
||||
// Pass empty string as dummy uid since uid isn't used in the case.
|
||||
var uid types.UID
|
||||
var secret map[string]string
|
||||
var err error
|
||||
if pod != nil {
|
||||
uid = pod.UID
|
||||
secret, err = createSecretMap(spec, plugin, pod.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret)
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.BlockVolumeMapper, error) {
|
||||
readOnly, _, err := getISCSIVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iscsiDisk, err := createISCSIDisk(spec, podUID, plugin, manager, secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &iscsiDiskMapper{
|
||||
iscsiDisk: iscsiDisk,
|
||||
readOnly: readOnly,
|
||||
exec: exec,
|
||||
deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
@ -212,25 +174,88 @@ func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state.
|
||||
func (plugin *iscsiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return plugin.newUnmapperInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetExec(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager, exec mount.Exec) (volume.BlockVolumeUnmapper, error) {
|
||||
return &iscsiDiskUnmapper{
|
||||
iscsiDisk: &iscsiDisk{
|
||||
podUID: podUID,
|
||||
VolName: volName,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
},
|
||||
exec: exec,
|
||||
deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
// Find globalPDPath from pod volume directory(mountPath)
|
||||
var globalPDPath string
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
paths, err := mount.GetMountRefs(mounter, mountPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, path := range paths {
|
||||
if strings.Contains(path, plugin.host.GetPluginDir(iscsiPluginName)) {
|
||||
globalPDPath = path
|
||||
break
|
||||
}
|
||||
}
|
||||
// Couldn't fetch globalPDPath
|
||||
if len(globalPDPath) == 0 {
|
||||
return nil, fmt.Errorf("couldn't fetch globalPDPath. failed to obtain volume spec")
|
||||
}
|
||||
|
||||
// Obtain iscsi disk configurations from globalPDPath
|
||||
device, _, err := extractDeviceAndPrefix(globalPDPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bkpPortal, iqn, err := extractPortalAndIqn(device)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iface, _ := extractIface(globalPDPath)
|
||||
iscsiVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
TargetPortal: volumeName,
|
||||
IQN: volumeName,
|
||||
TargetPortal: bkpPortal,
|
||||
IQN: iqn,
|
||||
ISCSIInterface: iface,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(iscsiVolume), nil
|
||||
}
|
||||
|
||||
func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(iscsiPluginName)
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
|
||||
// Retrieve volume information from globalMapPathUUID
|
||||
// globalMapPathUUID example:
|
||||
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid}
|
||||
// plugins/kubernetes.io/iscsi/volumeDevices/iface-default/192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0/{pod uuid}
|
||||
globalMapPath := filepath.Dir(globalMapPathUUID)
|
||||
return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath)
|
||||
}
|
||||
|
||||
type iscsiDisk struct {
|
||||
VolName string
|
||||
podUID types.UID
|
||||
Portals []string
|
||||
Iqn string
|
||||
lun string
|
||||
Lun string
|
||||
Iface string
|
||||
chap_discovery bool
|
||||
chap_session bool
|
||||
@ -248,10 +273,25 @@ func (iscsi *iscsiDisk) GetPath() string {
|
||||
return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name), iscsi.VolName)
|
||||
}
|
||||
|
||||
func (iscsi *iscsiDisk) iscsiGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, nil /* pod */)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get iscsi mounter: %v", err)
|
||||
return "", err
|
||||
}
|
||||
return iscsi.manager.MakeGlobalVDPDName(*mounter.iscsiDisk), nil
|
||||
}
|
||||
|
||||
func (iscsi *iscsiDisk) iscsiPodDeviceMapPath() (string, string) {
|
||||
name := iscsiPluginName
|
||||
return iscsi.plugin.host.GetPodVolumeDeviceDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name)), iscsi.VolName
|
||||
}
|
||||
|
||||
type iscsiDiskMounter struct {
|
||||
*iscsiDisk
|
||||
readOnly bool
|
||||
fsType string
|
||||
volumeMode v1.PersistentVolumeMode
|
||||
mounter *mount.SafeFormatAndMount
|
||||
exec mount.Exec
|
||||
deviceUtil ioutil.DeviceUtil
|
||||
@ -306,6 +346,58 @@ func (c *iscsiDiskUnmounter) TearDownAt(dir string) error {
|
||||
return ioutil.UnmountPath(dir, c.mounter)
|
||||
}
|
||||
|
||||
// Block Volumes Support
|
||||
type iscsiDiskMapper struct {
|
||||
*iscsiDisk
|
||||
readOnly bool
|
||||
exec mount.Exec
|
||||
deviceUtil ioutil.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &iscsiDiskMapper{}
|
||||
|
||||
func (b *iscsiDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type iscsiDiskUnmapper struct {
|
||||
*iscsiDisk
|
||||
exec mount.Exec
|
||||
deviceUtil ioutil.DeviceUtil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &iscsiDiskUnmapper{}
|
||||
|
||||
// Even though iSCSI plugin has attacher/detacher implementation, iSCSI plugin
|
||||
// needs volume detach operation during TearDownDevice(). This method is only
|
||||
// chance that operations are done on kubelet node during volume teardown sequences.
|
||||
func (c *iscsiDiskUnmapper) TearDownDevice(mapPath, _ string) error {
|
||||
err := c.manager.DetachBlockISCSIDisk(*c, mapPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", mapPath, err)
|
||||
}
|
||||
glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", mapPath)
|
||||
err = os.RemoveAll(mapPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", mapPath, err)
|
||||
}
|
||||
glog.V(4).Infof("iscsi: successfully detached disk: %s", mapPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id}
|
||||
func (iscsi *iscsiDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
return iscsi.iscsiGlobalMapPath(spec)
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~iscsi
|
||||
// volumeName: pv0001
|
||||
func (iscsi *iscsiDisk) GetPodDeviceMapPath() (string, string) {
|
||||
return iscsi.iscsiPodDeviceMapPath()
|
||||
}
|
||||
|
||||
func portalMounter(portal string) string {
|
||||
if !strings.Contains(portal, ":") {
|
||||
portal = portal + ":3260"
|
||||
@ -316,7 +408,7 @@ func portalMounter(portal string) string {
|
||||
// get iSCSI volume info: readOnly and fstype
|
||||
func getISCSIVolumeInfo(spec *volume.Spec) (bool, string, error) {
|
||||
// for volume source, readonly is in volume spec
|
||||
// for PV, readonly is in PV spec
|
||||
// for PV, readonly is in PV spec. PV gets the ReadOnly flag indirectly through the PVC source
|
||||
if spec.Volume != nil && spec.Volume.ISCSI != nil {
|
||||
return spec.Volume.ISCSI.ReadOnly, spec.Volume.ISCSI.FSType, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
@ -397,3 +489,155 @@ func getISCSISecretNameAndNamespace(spec *volume.Spec, defaultSecretNamespace st
|
||||
|
||||
return "", "", fmt.Errorf("Spec does not reference an ISCSI volume type")
|
||||
}
|
||||
|
||||
func createISCSIDisk(spec *volume.Spec, podUID types.UID, plugin *iscsiPlugin, manager diskManager, secret map[string]string) (*iscsiDisk, error) {
|
||||
tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lun := strconv.Itoa(int(lunStr))
|
||||
portal := portalMounter(tp)
|
||||
var bkportal []string
|
||||
bkportal = append(bkportal, portal)
|
||||
for _, p := range portals {
|
||||
bkportal = append(bkportal, portalMounter(string(p)))
|
||||
}
|
||||
|
||||
iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var initiatorName string
|
||||
if initiatorNamePtr != nil {
|
||||
initiatorName = *initiatorNamePtr
|
||||
}
|
||||
chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chapSession, err := getISCSISessionCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &iscsiDisk{
|
||||
podUID: podUID,
|
||||
VolName: spec.Name(),
|
||||
Portals: bkportal,
|
||||
Iqn: iqn,
|
||||
Lun: lun,
|
||||
Iface: iface,
|
||||
chap_discovery: chapDiscovery,
|
||||
chap_session: chapSession,
|
||||
secret: secret,
|
||||
InitiatorName: initiatorName,
|
||||
manager: manager,
|
||||
plugin: plugin}, nil
|
||||
}
|
||||
|
||||
func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) (map[string]string, error) {
|
||||
var secret map[string]string
|
||||
chapDiscover, err := getISCSIDiscoveryCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chapSession, err := getISCSISessionCHAPInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chapDiscover || chapSession {
|
||||
secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(secretName) > 0 && len(secretNamespace) > 0 {
|
||||
// if secret is provideded, retrieve it
|
||||
kubeClient := plugin.host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
return nil, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err)
|
||||
return nil, err
|
||||
}
|
||||
secret = make(map[string]string)
|
||||
for name, data := range secretObj.Data {
|
||||
glog.V(4).Infof("retrieving CHAP secret name: %s", name)
|
||||
secret[name] = string(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
return secret, err
|
||||
}
|
||||
|
||||
func createVolumeFromISCSIVolumeSource(volumeName string, iscsi v1.ISCSIVolumeSource) *v1.Volume {
|
||||
return &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ISCSI: &iscsi,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPersistentVolumeFromISCSIPVSource(volumeName string, iscsi v1.ISCSIPersistentVolumeSource) *v1.PersistentVolume {
|
||||
block := v1.PersistentVolumeBlock
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: volumeName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
ISCSI: &iscsi,
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) {
|
||||
// Retrieve volume spec information from globalMapPath
|
||||
// globalMapPath example:
|
||||
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}
|
||||
// plugins/kubernetes.io/iscsi/volumeDevices/iface-default/192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0
|
||||
|
||||
// device: 192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0
|
||||
device, _, err := extractDeviceAndPrefix(globalMapPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bkpPortal, iqn, err := extractPortalAndIqn(device)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arr := strings.Split(device, "-lun-")
|
||||
if len(arr) < 2 {
|
||||
return nil, fmt.Errorf("failed to retrieve lun from globalMapPath: %v", globalMapPath)
|
||||
}
|
||||
lun, err := strconv.Atoi(arr[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iface, found := extractIface(globalMapPath)
|
||||
if !found {
|
||||
return nil, fmt.Errorf("failed to retrieve iface from globalMapPath: %v", globalMapPath)
|
||||
}
|
||||
iscsiPV := createPersistentVolumeFromISCSIPVSource(volumeName,
|
||||
v1.ISCSIPersistentVolumeSource{
|
||||
TargetPortal: bkpPortal,
|
||||
IQN: iqn,
|
||||
Lun: int32(lun),
|
||||
ISCSIInterface: iface,
|
||||
},
|
||||
)
|
||||
glog.V(5).Infof("ConstructBlockVolumeSpec: TargetPortal: %v, IQN: %v, Lun: %v, ISCSIInterface: %v",
|
||||
iscsiPV.Spec.PersistentVolumeSource.ISCSI.TargetPortal,
|
||||
iscsiPV.Spec.PersistentVolumeSource.ISCSI.IQN,
|
||||
iscsiPV.Spec.PersistentVolumeSource.ISCSI.Lun,
|
||||
iscsiPV.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface,
|
||||
)
|
||||
return volume.NewSpecFromPersistentVolume(iscsiPV, false), nil
|
||||
}
|
||||
|
151
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_test.go
generated
vendored
151
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_test.go
generated
vendored
@ -19,6 +19,7 @@ package iscsi
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -51,6 +52,21 @@ func TestCanSupport(t *testing.T) {
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{ISCSI: &v1.ISCSIVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{ISCSI: &v1.ISCSIPersistentVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
@ -80,7 +96,7 @@ type fakeDiskManager struct {
|
||||
|
||||
func NewFakeDiskManager() *fakeDiskManager {
|
||||
return &fakeDiskManager{
|
||||
tmpDir: utiltesting.MkTmpdirOrDie("fc_test"),
|
||||
tmpDir: utiltesting.MkTmpdirOrDie("iscsi_test"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,6 +107,11 @@ func (fake *fakeDiskManager) Cleanup() {
|
||||
func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string {
|
||||
return fake.tmpDir
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) MakeGlobalVDPDName(disk iscsiDisk) string {
|
||||
return fake.tmpDir
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) AttachDisk(b iscsiDiskMounter) (string, error) {
|
||||
globalPath := b.manager.MakeGlobalPDName(*b.iscsiDisk)
|
||||
err := os.MkdirAll(globalPath, 0750)
|
||||
@ -113,6 +134,15 @@ func (fake *fakeDiskManager) DetachDisk(c iscsiDiskUnmounter, mntPath string) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakeDiskManager) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mntPath string) error {
|
||||
globalPath := c.manager.MakeGlobalVDPDName(*c.iscsiDisk)
|
||||
err := os.RemoveAll(globalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("iscsi_test")
|
||||
if err != nil {
|
||||
@ -155,13 +185,6 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fakeManager2 := NewFakeDiskManager()
|
||||
defer fakeManager2.Cleanup()
|
||||
@ -289,10 +312,12 @@ type testcase struct {
|
||||
defaultNs string
|
||||
spec *volume.Spec
|
||||
// Expected return of the test
|
||||
expectedName string
|
||||
expectedNs string
|
||||
expectedIface string
|
||||
expectedError error
|
||||
expectedName string
|
||||
expectedNs string
|
||||
expectedIface string
|
||||
expectedError error
|
||||
expectedDiscoveryCHAP bool
|
||||
expectedSessionCHAP bool
|
||||
}
|
||||
|
||||
func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
|
||||
@ -424,5 +449,105 @@ func TestGetISCSIInitiatorInfo(t *testing.T) {
|
||||
err, resultIface)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetISCSICHAP(t *testing.T) {
|
||||
tests := []testcase{
|
||||
{
|
||||
name: "persistent volume source",
|
||||
spec: &volume.Spec{
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
ISCSI: &v1.ISCSIPersistentVolumeSource{
|
||||
DiscoveryCHAPAuth: true,
|
||||
SessionCHAPAuth: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDiscoveryCHAP: true,
|
||||
expectedSessionCHAP: true,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "pod volume source",
|
||||
spec: &volume.Spec{
|
||||
Volume: &v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
DiscoveryCHAPAuth: true,
|
||||
SessionCHAPAuth: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDiscoveryCHAP: true,
|
||||
expectedSessionCHAP: true,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "no volume",
|
||||
spec: &volume.Spec{},
|
||||
expectedDiscoveryCHAP: false,
|
||||
expectedSessionCHAP: false,
|
||||
expectedError: fmt.Errorf("Spec does not reference an ISCSI volume type"),
|
||||
},
|
||||
}
|
||||
for _, testcase := range tests {
|
||||
resultDiscoveryCHAP, err := getISCSIDiscoveryCHAPInfo(testcase.spec)
|
||||
resultSessionCHAP, err := getISCSISessionCHAPInfo(testcase.spec)
|
||||
switch testcase.name {
|
||||
case "no volume":
|
||||
if err.Error() != testcase.expectedError.Error() || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP {
|
||||
t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v",
|
||||
testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP,
|
||||
err, resultDiscoveryCHAP, resultSessionCHAP)
|
||||
}
|
||||
default:
|
||||
if err != testcase.expectedError || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP {
|
||||
t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v", testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP,
|
||||
err, resultDiscoveryCHAP, resultSessionCHAP)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumeSpec(t *testing.T) {
|
||||
path := "plugins/kubernetes.io/iscsi/volumeDevices/iface-default/127.0.0.1:3260-iqn.2014-12.server:storage.target01-lun-0"
|
||||
spec, _ := getVolumeSpecFromGlobalMapPath("test", path)
|
||||
|
||||
portal := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.TargetPortal
|
||||
if portal != "127.0.0.1:3260" {
|
||||
t.Errorf("wrong portal: %v", portal)
|
||||
}
|
||||
iqn := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.IQN
|
||||
if iqn != "iqn.2014-12.server:storage.target01" {
|
||||
t.Errorf("wrong iqn: %v", iqn)
|
||||
}
|
||||
lun := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.Lun
|
||||
if lun != 0 {
|
||||
t.Errorf("wrong lun: %v", lun)
|
||||
}
|
||||
iface := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface
|
||||
if iface != "default" {
|
||||
t.Errorf("wrong ISCSIInterface: %v", iface)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumeSpec_no_lun(t *testing.T) {
|
||||
path := "plugins/kubernetes.io/iscsi/volumeDevices/iface-default/127.0.0.1:3260-iqn.2014-12.server:storage.target01"
|
||||
_, err := getVolumeSpecFromGlobalMapPath("test", path)
|
||||
if !strings.Contains(err.Error(), "malformatted mnt path") {
|
||||
t.Errorf("should get error: malformatted mnt path")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVolumeSpec_no_iface(t *testing.T) {
|
||||
path := "plugins/kubernetes.io/iscsi/volumeDevices/default/127.0.0.1:3260-iqn.2014-12.server:storage.target01-lun-0"
|
||||
_, err := getVolumeSpecFromGlobalMapPath("test", path)
|
||||
if !strings.Contains(err.Error(), "failed to retrieve iface") {
|
||||
t.Errorf("should get error: failed to retrieve iface")
|
||||
}
|
||||
}
|
||||
|
217
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go
generated
vendored
217
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go
generated
vendored
@ -27,9 +27,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -163,10 +167,21 @@ func makePDNameInternal(host volume.VolumeHost, portal string, iqn string, lun s
|
||||
return path.Join(host.GetPluginDir(iscsiPluginName), "iface-"+iface, portal+"-"+iqn+"-lun-"+lun)
|
||||
}
|
||||
|
||||
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/iface_name/portal-some_iqn-lun-lun_id
|
||||
func makeVDPDNameInternal(host volume.VolumeHost, portal string, iqn string, lun string, iface string) string {
|
||||
return path.Join(host.GetVolumeDevicePluginDir(iscsiPluginName), "iface-"+iface, portal+"-"+iqn+"-lun-"+lun)
|
||||
}
|
||||
|
||||
type ISCSIUtil struct{}
|
||||
|
||||
// MakeGlobalPDName returns path of global plugin dir
|
||||
func (util *ISCSIUtil) MakeGlobalPDName(iscsi iscsiDisk) string {
|
||||
return makePDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.lun, iscsi.Iface)
|
||||
return makePDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.Lun, iscsi.Iface)
|
||||
}
|
||||
|
||||
// MakeGlobalVDPDName returns path of global volume device plugin dir
|
||||
func (util *ISCSIUtil) MakeGlobalVDPDName(iscsi iscsiDisk) string {
|
||||
return makeVDPDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.Lun, iscsi.Iface)
|
||||
}
|
||||
|
||||
func (util *ISCSIUtil) persistISCSI(conf iscsiDisk, mnt string) error {
|
||||
@ -184,7 +199,6 @@ func (util *ISCSIUtil) persistISCSI(conf iscsiDisk, mnt string) error {
|
||||
}
|
||||
|
||||
func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error {
|
||||
// NOTE: The iscsi config json is not deleted after logging out from target portals.
|
||||
file := path.Join(mnt, "iscsi.json")
|
||||
fp, err := os.Open(file)
|
||||
if err != nil {
|
||||
@ -198,6 +212,7 @@ func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttachDisk returns devicePath of volume if attach succeeded otherwise returns error
|
||||
func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
|
||||
var devicePath string
|
||||
var devicePaths []string
|
||||
@ -240,9 +255,9 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
|
||||
return "", fmt.Errorf("Could not parse iface file for %s", b.Iface)
|
||||
}
|
||||
if iscsiTransport == "tcp" {
|
||||
devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.lun}, "-")
|
||||
devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-")
|
||||
} else {
|
||||
devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.lun}, "-")
|
||||
devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-")
|
||||
}
|
||||
|
||||
if exist := waitForPathToExist(&devicePath, 1, iscsiTransport); exist {
|
||||
@ -279,6 +294,12 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
|
||||
lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err)
|
||||
continue
|
||||
}
|
||||
// in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot
|
||||
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "node.startup", "-v", "manual")
|
||||
if err != nil {
|
||||
// don't fail if we can't set startup mode, but log warning so there is a clue
|
||||
glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err)
|
||||
}
|
||||
if exist := waitForPathToExist(&devicePath, 10, iscsiTransport); !exist {
|
||||
glog.Errorf("Could not attach disk: Timeout after 10s")
|
||||
// update last error
|
||||
@ -301,26 +322,6 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
|
||||
|
||||
//Make sure we use a valid devicepath to find mpio device.
|
||||
devicePath = devicePaths[0]
|
||||
|
||||
// mount it
|
||||
globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk)
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("Heuristic determination of mount point failed:%v", err)
|
||||
}
|
||||
if !notMnt {
|
||||
glog.Infof("iscsi: %s already mounted", globalPDPath)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Persist iscsi disk config to json file for DetachDisk path
|
||||
util.persistISCSI(*(b.iscsiDisk), globalPDPath)
|
||||
|
||||
for _, path := range devicePaths {
|
||||
// There shouldnt be any empty device paths. However adding this check
|
||||
// for safer side to avoid the possibility of an empty entry.
|
||||
@ -333,14 +334,67 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
|
||||
}
|
||||
|
||||
return devicePath, err
|
||||
glog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath)
|
||||
// run global mount path related operations based on volumeMode
|
||||
return globalPDPathOperation(b)(b, devicePath, util)
|
||||
}
|
||||
|
||||
// globalPDPathOperation returns global mount path related operations based on volumeMode.
|
||||
// If the volumeMode is 'Filesystem' or not defined, plugin needs to create a dir, persist
|
||||
// iscsi configurations, and then format/mount the volume.
|
||||
// If the volumeMode is 'Block', plugin creates a dir and persists iscsi configurations.
|
||||
// Since volume type is block, plugin doesn't need to format/mount the volume.
|
||||
func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *ISCSIUtil) (string, error) {
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
glog.V(5).Infof("iscsi: AttachDisk volumeMode: %s", b.volumeMode)
|
||||
if b.volumeMode == v1.PersistentVolumeBlock {
|
||||
// If the volumeMode is 'Block', plugin don't need to format the volume.
|
||||
return func(b iscsiDiskMounter, devicePath string, util *ISCSIUtil) (string, error) {
|
||||
globalPDPath := b.manager.MakeGlobalVDPDName(*b.iscsiDisk)
|
||||
// Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id}
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath)
|
||||
return "", err
|
||||
}
|
||||
// Persist iscsi disk config to json file for DetachDisk path
|
||||
util.persistISCSI(*(b.iscsiDisk), globalPDPath)
|
||||
|
||||
return devicePath, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the volumeMode is 'Filesystem', plugin needs to format the volume
|
||||
// and mount it to globalPDPath.
|
||||
return func(b iscsiDiskMounter, devicePath string, util *ISCSIUtil) (string, error) {
|
||||
globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk)
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("Heuristic determination of mount point failed:%v", err)
|
||||
}
|
||||
// Return confirmed devicePath to caller
|
||||
if !notMnt {
|
||||
glog.Infof("iscsi: %s already mounted", globalPDPath)
|
||||
return devicePath, nil
|
||||
}
|
||||
// Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/{ifaceName}/{portal-some_iqn-lun-lun_id}
|
||||
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
|
||||
glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath)
|
||||
return "", err
|
||||
}
|
||||
// Persist iscsi disk config to json file for DetachDisk path
|
||||
util.persistISCSI(*(b.iscsiDisk), globalPDPath)
|
||||
|
||||
err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
|
||||
}
|
||||
|
||||
return devicePath, nil
|
||||
}
|
||||
}
|
||||
|
||||
// DetachDisk unmounts and detaches a volume from node
|
||||
func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
|
||||
_, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath)
|
||||
if err != nil {
|
||||
@ -395,9 +449,100 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
|
||||
}
|
||||
portals := removeDuplicate(bkpPortal)
|
||||
if len(portals) == 0 {
|
||||
return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations.")
|
||||
return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations")
|
||||
}
|
||||
|
||||
err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to finish detachISCSIDisk, err: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachBlockISCSIDisk removes loopback device for a volume and detaches a volume from node
|
||||
func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) error {
|
||||
if pathExists, pathErr := volumeutil.PathExists(mapPath); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath)
|
||||
return nil
|
||||
}
|
||||
// If we arrive here, device is no longer used, see if need to logout the target
|
||||
// device: 192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0
|
||||
device, _, err := extractDeviceAndPrefix(mapPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var bkpPortal []string
|
||||
var volName, iqn, lun, iface, initiatorName string
|
||||
found := true
|
||||
// load iscsi disk config from json file
|
||||
if err := util.loadISCSI(c.iscsiDisk, mapPath); err == nil {
|
||||
bkpPortal, iqn, lun, iface, volName = c.iscsiDisk.Portals, c.iscsiDisk.Iqn, c.iscsiDisk.Lun, c.iscsiDisk.Iface, c.iscsiDisk.VolName
|
||||
initiatorName = c.iscsiDisk.InitiatorName
|
||||
} else {
|
||||
// If the iscsi disk config is not found, fall back to the original behavior.
|
||||
// This portal/iqn/iface is no longer referenced, log out.
|
||||
// Extract the portal and iqn from device path.
|
||||
bkpPortal = make([]string, 1)
|
||||
bkpPortal[0], iqn, err = extractPortalAndIqn(device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
arr := strings.Split(device, "-lun-")
|
||||
if len(arr) < 2 {
|
||||
return fmt.Errorf("failed to retrieve lun from mapPath: %v", mapPath)
|
||||
}
|
||||
lun = arr[1]
|
||||
// Extract the iface from the mountPath and use it to log out. If the iface
|
||||
// is not found, maintain the previous behavior to facilitate kubelet upgrade.
|
||||
// Logout may fail as no session may exist for the portal/IQN on the specified interface.
|
||||
iface, found = extractIface(mapPath)
|
||||
}
|
||||
portals := removeDuplicate(bkpPortal)
|
||||
if len(portals) == 0 {
|
||||
return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations")
|
||||
}
|
||||
|
||||
devicePath := getDevByPath(portals[0], iqn, lun)
|
||||
glog.V(5).Infof("iscsi: devicePath: %s", devicePath)
|
||||
if _, err = os.Stat(devicePath); err != nil {
|
||||
return fmt.Errorf("failed to validate devicePath: %s", devicePath)
|
||||
}
|
||||
// check if the dev is using mpio and if so mount it via the dm-XX device
|
||||
if mappedDevicePath := c.deviceUtil.FindMultipathDeviceForDevice(devicePath); mappedDevicePath != "" {
|
||||
devicePath = mappedDevicePath
|
||||
}
|
||||
// Get loopback device which takes fd lock for devicePath before detaching a volume from node.
|
||||
// TODO: This is a workaround for issue #54108
|
||||
// Currently local attach plugins such as FC, iSCSI, RBD can't obtain devicePath during
|
||||
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
|
||||
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
||||
// local attach plugins needs to remove loopback device during TearDownDevice().
|
||||
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, devicePath)
|
||||
if err != nil {
|
||||
if err.Error() != volumepathhandler.ErrDeviceNotFound {
|
||||
return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err)
|
||||
}
|
||||
glog.Warning("iscsi: loopback for device: %s not found", device)
|
||||
}
|
||||
// Detach a volume from kubelet node
|
||||
err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to finish detachISCSIDisk, err: %v", err)
|
||||
}
|
||||
if len(loop) != 0 {
|
||||
// The volume was successfully detached from node. We can safely remove the loopback.
|
||||
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove loopback :%v, err: %v", loop, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, iface, volName, initiatorName string, found bool) error {
|
||||
for _, portal := range portals {
|
||||
logoutArgs := []string{"-m", "node", "-p", portal, "-T", iqn, "--logout"}
|
||||
deleteArgs := []string{"-m", "node", "-p", portal, "-T", iqn, "-o", "delete"}
|
||||
@ -406,13 +551,13 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
|
||||
deleteArgs = append(deleteArgs, []string{"-I", iface}...)
|
||||
}
|
||||
glog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface)
|
||||
out, err := c.exec.Run("iscsiadm", logoutArgs...)
|
||||
out, err := exec.Run("iscsiadm", logoutArgs...)
|
||||
if err != nil {
|
||||
glog.Errorf("iscsi: failed to detach disk Error: %s", string(out))
|
||||
}
|
||||
// Delete the node record
|
||||
glog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn)
|
||||
out, err = c.exec.Run("iscsiadm", deleteArgs...)
|
||||
out, err = exec.Run("iscsiadm", deleteArgs...)
|
||||
if err != nil {
|
||||
glog.Errorf("iscsi: failed to delete node record Error: %s", string(out))
|
||||
}
|
||||
@ -421,7 +566,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
|
||||
// If the iface is not created via iscsi plugin, skip to delete
|
||||
if initiatorName != "" && found && iface == (portals[0]+":"+volName) {
|
||||
deleteArgs := []string{"-m", "iface", "-I", iface, "-o", "delete"}
|
||||
out, err := c.exec.Run("iscsiadm", deleteArgs...)
|
||||
out, err := exec.Run("iscsiadm", deleteArgs...)
|
||||
if err != nil {
|
||||
glog.Errorf("iscsi: failed to delete iface Error: %s", string(out))
|
||||
}
|
||||
@ -430,6 +575,10 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDevByPath(portal, iqn, lun string) string {
|
||||
return "/dev/disk/by-path/ip-" + portal + "-iscsi-" + iqn + "-lun-" + lun
|
||||
}
|
||||
|
||||
func extractTransportname(ifaceOutput string) (iscsiTransport string) {
|
||||
rexOutput := ifaceTransportNameRe.FindStringSubmatch(ifaceOutput)
|
||||
if rexOutput == nil {
|
||||
|
50
vendor/k8s.io/kubernetes/pkg/volume/local/BUILD
generated
vendored
50
vendor/k8s.io/kubernetes/pkg/volume/local/BUILD
generated
vendored
@ -1,10 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -13,6 +7,7 @@ go_library(
|
||||
"local.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/local",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
@ -32,17 +27,35 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["local_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/local",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"local_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"local_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
@ -56,4 +69,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
95
vendor/k8s.io/kubernetes/pkg/volume/local/local.go
generated
vendored
95
vendor/k8s.io/kubernetes/pkg/volume/local/local.go
generated
vendored
@ -19,6 +19,7 @@ package local
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
@ -49,6 +50,7 @@ type localVolumePlugin struct {
|
||||
|
||||
var _ volume.VolumePlugin = &localVolumePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &localVolumePlugin{}
|
||||
var _ volume.BlockVolumePlugin = &localVolumePlugin{}
|
||||
|
||||
const (
|
||||
localVolumePluginName = "kubernetes.io/local-volume"
|
||||
@ -137,6 +139,36 @@ func (plugin *localVolumePlugin) NewUnmounter(volName string, podUID types.UID)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod,
|
||||
_ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &localVolumeMapper{
|
||||
localVolume: &localVolume{
|
||||
podUID: pod.UID,
|
||||
volName: spec.Name(),
|
||||
globalPath: volumeSource.Path,
|
||||
plugin: plugin,
|
||||
},
|
||||
readOnly: readOnly,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) NewBlockVolumeUnmapper(volName string,
|
||||
podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return &localVolumeUnmapper{
|
||||
localVolume: &localVolume{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
plugin: plugin,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO: check if no path and no topology constraints are ok
|
||||
func (plugin *localVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
localVolume := &v1.PersistentVolume{
|
||||
@ -154,6 +186,27 @@ func (plugin *localVolumePlugin) ConstructVolumeSpec(volumeName, mountPath strin
|
||||
return volume.NewSpecFromPersistentVolume(localVolume, false), nil
|
||||
}
|
||||
|
||||
func (plugin *localVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName,
|
||||
mapPath string) (*volume.Spec, error) {
|
||||
block := v1.PersistentVolumeBlock
|
||||
|
||||
localVolume := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: volumeName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
Path: "",
|
||||
},
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(localVolume, false), nil
|
||||
}
|
||||
|
||||
// Local volumes represent a local directory on a node.
|
||||
// The directory at the globalPath will be bind-mounted to the pod's directory
|
||||
type localVolume struct {
|
||||
@ -307,3 +360,45 @@ func (u *localVolumeUnmounter) TearDownAt(dir string) error {
|
||||
glog.V(4).Infof("Unmounting volume %q at path %q\n", u.volName, dir)
|
||||
return util.UnmountMountPoint(dir, u.mounter, true) /* extensiveMountPointCheck = true */
|
||||
}
|
||||
|
||||
// localVolumeMapper implements the BlockVolumeMapper interface for local volumes.
|
||||
type localVolumeMapper struct {
|
||||
*localVolume
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &localVolumeMapper{}
|
||||
|
||||
// SetUpDevice provides physical device path for the local PV.
|
||||
func (m *localVolumeMapper) SetUpDevice() (string, error) {
|
||||
glog.V(4).Infof("SetupDevice returning path %s", m.globalPath)
|
||||
return m.globalPath, nil
|
||||
}
|
||||
|
||||
// localVolumeUnmapper implements the BlockVolumeUnmapper interface for local volumes.
|
||||
type localVolumeUnmapper struct {
|
||||
*localVolume
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &localVolumeUnmapper{}
|
||||
|
||||
// TearDownDevice will undo SetUpDevice procedure. In local PV, all of this already handled by operation_generator.
|
||||
func (u *localVolumeUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
glog.V(4).Infof("local: TearDownDevice completed for: %s", mapPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error.
|
||||
// path: plugins/kubernetes.io/kubernetes.io/local-volume/volumeDevices/{volumeName}
|
||||
func (lv *localVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
return path.Join(lv.plugin.host.GetVolumeDevicePluginDir(strings.EscapeQualifiedNameForDisk(localVolumePluginName)),
|
||||
lv.volName), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name.
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~local-volume
|
||||
// volName: local-pv-ff0d6d4
|
||||
func (lv *localVolume) GetPodDeviceMapPath() (string, string) {
|
||||
return lv.plugin.host.GetPodVolumeDeviceDir(lv.podUID,
|
||||
strings.EscapeQualifiedNameForDisk(localVolumePluginName)), lv.volName
|
||||
}
|
||||
|
149
vendor/k8s.io/kubernetes/pkg/volume/local/local_test.go
generated
vendored
149
vendor/k8s.io/kubernetes/pkg/volume/local/local_test.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
// +build linux darwin
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
@ -32,9 +34,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
testPVName = "pvA"
|
||||
testMountPath = "pods/poduid/volumes/kubernetes.io~local-volume/pvA"
|
||||
testNodeName = "fakeNodeName"
|
||||
testPVName = "pvA"
|
||||
testMountPath = "pods/poduid/volumes/kubernetes.io~local-volume/pvA"
|
||||
testGlobalPath = "plugins/kubernetes.io~local-volume/volumeDevices/pvA"
|
||||
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~local-volume"
|
||||
testNodeName = "fakeNodeName"
|
||||
)
|
||||
|
||||
func getPlugin(t *testing.T) (string, volume.VolumePlugin) {
|
||||
@ -57,6 +61,25 @@ func getPlugin(t *testing.T) (string, volume.VolumePlugin) {
|
||||
return tmpDir, plug
|
||||
}
|
||||
|
||||
func getBlockPlugin(t *testing.T) (string, volume.BlockVolumePlugin) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
plug, err := plugMgr.FindMapperPluginByName(localVolumePluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
t.Fatalf("Can't find the plugin by name: %q", localVolumePluginName)
|
||||
}
|
||||
if plug.GetPluginName() != localVolumePluginName {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
return tmpDir, plug
|
||||
}
|
||||
|
||||
func getPersistentPlugin(t *testing.T) (string, volume.PersistentVolumePlugin) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
||||
if err != nil {
|
||||
@ -77,7 +100,7 @@ func getPersistentPlugin(t *testing.T) (string, volume.PersistentVolumePlugin) {
|
||||
return tmpDir, plug
|
||||
}
|
||||
|
||||
func getTestVolume(readOnly bool, path string) *volume.Spec {
|
||||
func getTestVolume(readOnly bool, path string, isBlock bool) *volume.Spec {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPVName,
|
||||
@ -90,6 +113,11 @@ func getTestVolume(readOnly bool, path string) *volume.Spec {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
pv.Spec.VolumeMode = &blockMode
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
||||
}
|
||||
|
||||
@ -97,7 +125,7 @@ func TestCanSupport(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
if !plug.CanSupport(getTestVolume(false, tmpDir)) {
|
||||
if !plug.CanSupport(getTestVolume(false, tmpDir, false)) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
@ -123,7 +151,7 @@ func TestGetVolumeName(t *testing.T) {
|
||||
tmpDir, plug := getPersistentPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
volName, err := plug.GetVolumeName(getTestVolume(false, tmpDir))
|
||||
volName, err := plug.GetVolumeName(getTestVolume(false, tmpDir, false))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get volume name: %v", err)
|
||||
}
|
||||
@ -137,7 +165,7 @@ func TestInvalidLocalPath(t *testing.T) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, "/no/backsteps/allowed/.."), pod, volume.VolumeOptions{})
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, "/no/backsteps/allowed/..", false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -154,7 +182,7 @@ func TestMountUnmount(t *testing.T) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, tmpDir), pod, volume.VolumeOptions{})
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
@ -197,8 +225,64 @@ func TestMountUnmount(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestMapUnmap tests block map and unmap interfaces.
|
||||
func TestMapUnmap(t *testing.T) {
|
||||
tmpDir, plug := getBlockPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
volSpec := getTestVolume(false, tmpDir, true /*isBlock*/)
|
||||
mapper, err := plug.NewBlockVolumeMapper(volSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mapper == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
expectedGlobalPath := path.Join(tmpDir, testGlobalPath)
|
||||
globalPath, err := mapper.GetGlobalMapPath(volSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get global path: %v", err)
|
||||
}
|
||||
if globalPath != expectedGlobalPath {
|
||||
t.Errorf("Got unexpected path: %s, expected %s", globalPath, expectedGlobalPath)
|
||||
}
|
||||
expectedPodPath := path.Join(tmpDir, testPodPath)
|
||||
podPath, volName := mapper.GetPodDeviceMapPath()
|
||||
if podPath != expectedPodPath {
|
||||
t.Errorf("Got unexpected pod path: %s, expected %s", podPath, expectedPodPath)
|
||||
}
|
||||
if volName != testPVName {
|
||||
t.Errorf("Got unexpected volNamne: %s, expected %s", volName, testPVName)
|
||||
}
|
||||
devPath, err := mapper.SetUpDevice()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to SetUpDevice, err: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(devPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUpDevice() failed, volume path not created: %s", devPath)
|
||||
} else {
|
||||
t.Errorf("SetUpDevice() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmapper, err := plug.NewBlockVolumeUnmapper(testPVName, pod.UID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Unmapper: %v", err)
|
||||
}
|
||||
if unmapper == nil {
|
||||
t.Fatalf("Got a nil Unmapper")
|
||||
}
|
||||
|
||||
if err := unmapper.TearDownDevice(globalPath, devPath); err != nil {
|
||||
t.Errorf("TearDownDevice failed, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testFSGroupMount(plug volume.VolumePlugin, pod *v1.Pod, tmpDir string, fsGroup int64) error {
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, tmpDir), pod, volume.VolumeOptions{})
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -290,13 +374,54 @@ func TestConstructVolumeSpec(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstructBlockVolumeSpec(t *testing.T) {
|
||||
tmpDir, plug := getBlockPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
podPath := path.Join(tmpDir, testPodPath)
|
||||
spec, err := plug.ConstructBlockVolumeSpec(types.UID("poduid"), testPVName, podPath)
|
||||
if err != nil {
|
||||
t.Errorf("ConstructBlockVolumeSpec() failed: %v", err)
|
||||
}
|
||||
if spec == nil {
|
||||
t.Fatalf("ConstructBlockVolumeSpec() returned nil")
|
||||
}
|
||||
|
||||
volName := spec.Name()
|
||||
if volName != testPVName {
|
||||
t.Errorf("Expected volume name %q, got %q", testPVName, volName)
|
||||
}
|
||||
|
||||
if spec.Volume != nil {
|
||||
t.Errorf("Volume object returned, expected nil")
|
||||
}
|
||||
|
||||
pv := spec.PersistentVolume
|
||||
if pv == nil {
|
||||
t.Fatalf("PersistentVolume object nil")
|
||||
}
|
||||
|
||||
if spec.PersistentVolume.Spec.VolumeMode == nil {
|
||||
t.Fatalf("Volume mode has not been set.")
|
||||
}
|
||||
|
||||
if *spec.PersistentVolume.Spec.VolumeMode != v1.PersistentVolumeBlock {
|
||||
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
|
||||
}
|
||||
|
||||
ls := pv.Spec.PersistentVolumeSource.Local
|
||||
if ls == nil {
|
||||
t.Fatalf("LocalVolumeSource object nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Read only == true
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(getTestVolume(true, tmpDir), pod, volume.VolumeOptions{})
|
||||
mounter, err := plug.NewMounter(getTestVolume(true, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
@ -308,7 +433,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
}
|
||||
|
||||
// Read only == false
|
||||
mounter, err = plug.NewMounter(getTestVolume(false, tmpDir), pod, volume.VolumeOptions{})
|
||||
mounter, err = plug.NewMounter(getTestVolume(false, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
@ -329,7 +454,7 @@ func TestUnsupportedPlugins(t *testing.T) {
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
spec := getTestVolume(false, tmpDir)
|
||||
spec := getTestVolume(false, tmpDir, false)
|
||||
|
||||
recyclePlug, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||
if err == nil && recyclePlug != nil {
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go
generated
vendored
@ -19,7 +19,7 @@ package volume
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/fs"
|
||||
)
|
||||
|
||||
var _ MetricsProvider = &metricsDu{}
|
||||
@ -66,7 +66,7 @@ func (md *metricsDu) GetMetrics() (*Metrics, error) {
|
||||
|
||||
// runDu executes the "du" command and writes the results to metrics.Used
|
||||
func (md *metricsDu) runDu(metrics *Metrics) error {
|
||||
used, err := util.Du(md.path)
|
||||
used, err := fs.Du(md.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -76,7 +76,7 @@ func (md *metricsDu) runDu(metrics *Metrics) error {
|
||||
|
||||
// runFind executes the "find" command and writes the results to metrics.InodesUsed
|
||||
func (md *metricsDu) runFind(metrics *Metrics) error {
|
||||
inodesUsed, err := util.Find(md.path)
|
||||
inodesUsed, err := fs.Find(md.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -87,7 +87,7 @@ func (md *metricsDu) runFind(metrics *Metrics) error {
|
||||
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
|
||||
// info
|
||||
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
|
||||
available, capacity, _, inodes, inodesFree, _, err := util.FsInfo(md.path)
|
||||
available, capacity, _, inodes, inodesFree, _, err := fs.FsInfo(md.path)
|
||||
if err != nil {
|
||||
return NewFsInfoFailedError(err)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/metrics_statfs.go
generated
vendored
@ -19,7 +19,7 @@ package volume
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/fs"
|
||||
)
|
||||
|
||||
var _ MetricsProvider = &metricsStatFS{}
|
||||
@ -55,7 +55,7 @@ func (md *metricsStatFS) GetMetrics() (*Metrics, error) {
|
||||
|
||||
// getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info
|
||||
func (md *metricsStatFS) getFsInfo(metrics *Metrics) error {
|
||||
available, capacity, usage, inodes, inodesFree, inodesUsed, err := util.FsInfo(md.path)
|
||||
available, capacity, usage, inodes, inodesFree, inodesUsed, err := fs.FsInfo(md.path)
|
||||
if err != nil {
|
||||
return NewFsInfoFailedError(err)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user