Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -9,93 +9,48 @@ go_library(
"metrics_errors.go",
"metrics_nil.go",
"metrics_statfs.go",
"noop_expandable_plugin.go",
"plugins.go",
"volume.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"volume_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"volume_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"volume_unsupported.go",
],
"//conditions:default": [],
}),
"volume_linux.go",
"volume_unsupported.go",
],
importpath = "k8s.io/kubernetes/pkg/volume",
visibility = ["//visibility:public"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume/util/fs:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"metrics_du_test.go",
"metrics_nil_test.go",
"metrics_statfs_test.go",
"plugins_test.go",
],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = [
"metrics_statfs_test.go",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"metrics_du_test.go",
],
"//conditions:default": [],
}),
deps = [
":go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",
@ -115,7 +70,7 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/volume/aws_ebs:all-srcs",
"//pkg/volume/awsebs:all-srcs",
"//pkg/volume/azure_dd:all-srcs",
"//pkg/volume/azure_file:all-srcs",
"//pkg/volume/cephfs:all-srcs",
@ -123,11 +78,11 @@ filegroup(
"//pkg/volume/configmap:all-srcs",
"//pkg/volume/csi:all-srcs",
"//pkg/volume/downwardapi:all-srcs",
"//pkg/volume/empty_dir:all-srcs",
"//pkg/volume/emptydir:all-srcs",
"//pkg/volume/fc:all-srcs",
"//pkg/volume/flexvolume:all-srcs",
"//pkg/volume/flocker:all-srcs",
"//pkg/volume/gce_pd:all-srcs",
"//pkg/volume/gcepd:all-srcs",
"//pkg/volume/git_repo:all-srcs",
"//pkg/volume/glusterfs:all-srcs",
"//pkg/volume/host_path:all-srcs",

View File

@ -15,3 +15,5 @@ reviewers:
- gnufied
- verult
- davidz627
labels:
- sig/storage

View File

@ -15,9 +15,8 @@ go_library(
"aws_util.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs",
importpath = "k8s.io/kubernetes/pkg/volume/awsebs",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/features:go_default_library",
"//pkg/util/mount:go_default_library",
@ -25,12 +24,14 @@ go_library(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -47,13 +48,13 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
package awsebs
import (
"fmt"
@ -23,7 +23,8 @@ import (
"strconv"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
@ -39,8 +40,12 @@ type awsElasticBlockStoreAttacher struct {
var _ volume.Attacher = &awsElasticBlockStoreAttacher{}
var _ volume.DeviceMounter = &awsElasticBlockStoreAttacher{}
var _ volume.AttachableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.DeviceMountableVolumePlugin = &awsElasticBlockStorePlugin{}
func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error) {
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@ -53,9 +58,13 @@ func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error)
}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
@ -70,7 +79,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName
// succeeds in that case, so no need to do that separately.
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName)
if err != nil {
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err)
klog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err)
return "", err
}
@ -79,14 +88,14 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName
func (attacher *awsElasticBlockStoreAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for AWS", nodeName)
klog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for AWS", nodeName)
volumeNodeMap := map[types.NodeName][]*volume.Spec{
nodeName: specs,
}
nodeVolumesResult := make(map[*volume.Spec]bool)
nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap)
if err != nil {
glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err)
klog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err)
return nodeVolumesResult, err
}
@ -106,7 +115,7 @@ func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode ma
volumeSource, _, err := getVolumeSource(volumeSpec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err)
klog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err)
continue
}
@ -126,7 +135,7 @@ func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode ma
attachedResult, err := attacher.awsVolumes.DisksAreAttached(diskNamesByNode)
if err != nil {
glog.Errorf("Error checking if volumes are attached to nodes err = %v", err)
klog.Errorf("Error checking if volumes are attached to nodes err = %v", err)
return volumesAttachedCheck, err
}
@ -155,7 +164,7 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d
}
if devicePath == "" {
return "", fmt.Errorf("WaitForAttach failed for AWS Volume %q: devicePath is empty.", volumeID)
return "", fmt.Errorf("waitForAttach failed for AWS Volume %q: devicePath is empty", volumeID)
}
ticker := time.NewTicker(checkSleepDuration)
@ -166,19 +175,19 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath)
klog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID)
devicePaths := getDiskByIDPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath)
path, err := verifyDevicePath(devicePaths)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err)
klog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err)
} else if path != "" {
// A device path has successfully been created for the PD
glog.Infof("Successfully found attached AWS Volume %q.", volumeID)
klog.Infof("Successfully found attached AWS Volume %q.", volumeID)
return path, nil
}
case <-timer.C:
return "", fmt.Errorf("Could not find attached AWS Volume %q. Timeout waiting for mount paths to be created.", volumeID)
return "", fmt.Errorf("could not find attached AWS Volume %q. Timeout waiting for mount paths to be created", volumeID)
}
}
}
@ -236,6 +245,8 @@ type awsElasticBlockStoreDetacher struct {
var _ volume.Detacher = &awsElasticBlockStoreDetacher{}
var _ volume.DeviceUnmounter = &awsElasticBlockStoreDetacher{}
func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) {
awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@ -248,11 +259,15 @@ func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error)
}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *awsElasticBlockStoreDetacher) Detach(volumeName string, nodeName types.NodeName) error {
volumeID := aws.KubernetesVolumeID(path.Base(volumeName))
if _, err := detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil {
glog.Errorf("Error detaching volumeID %q: %v", volumeID, err)
klog.Errorf("Error detaching volumeID %q: %v", volumeID, err)
return err
}
return nil

View File

@ -14,20 +14,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
package awsebs
import (
"errors"
"testing"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
)
func TestGetVolumeName_Volume(t *testing.T) {
@ -219,7 +219,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return "", errors.New("Unexpected AttachDisk call!")
return "", errors.New("unexpected AttachDisk call")
}
if expected.diskName != diskName {
@ -232,7 +232,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
}
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
klog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret
}
@ -244,7 +244,7 @@ func (testcase *testcase) DetachDisk(diskName aws.KubernetesVolumeID, nodeName t
// testcase.detach looks uninitialized, test did not expect to call
// DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!")
return "", errors.New("Unexpected DetachDisk call!")
return "", errors.New("unexpected DetachDisk call")
}
if expected.diskName != diskName {
@ -257,7 +257,7 @@ func (testcase *testcase) DetachDisk(diskName aws.KubernetesVolumeID, nodeName t
return "", errors.New("Unexpected DetachDisk call: wrong nodeName")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
klog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
package awsebs
import (
"context"
@ -25,7 +25,8 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -39,7 +40,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util"
)
// This is the primary entrypoint for volume plugins.
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}}
}
@ -99,7 +100,7 @@ func (plugin *awsElasticBlockStorePlugin) SupportsBulkVolumeVerification() bool
func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, error) {
volumeLimits := map[string]int64{
util.EBSVolumeLimitKey: 39,
util.EBSVolumeLimitKey: util.DefaultMaxEBSVolumes,
}
cloud := plugin.host.GetCloudProvider()
@ -117,18 +118,18 @@ func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, e
instances, ok := cloud.Instances()
if !ok {
glog.V(3).Infof("Failed to get instances from cloud provider")
klog.V(3).Infof("Failed to get instances from cloud provider")
return volumeLimits, nil
}
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
if err != nil {
glog.Errorf("Failed to get instance type from AWS cloud provider")
klog.Errorf("Failed to get instance type from AWS cloud provider")
return volumeLimits, nil
}
if ok, _ := regexp.MatchString("^[cm]5.*", instanceType); ok {
volumeLimits[util.EBSVolumeLimitKey] = 25
if ok, _ := regexp.MatchString(util.EBSNitroLimitRegex, instanceType); ok {
volumeLimits[util.EBSVolumeLimitKey] = util.DefaultMaxEBSNitroVolumeLimit
}
return volumeLimits, nil
@ -175,9 +176,11 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
fsType: fsType,
readOnly: readOnly,
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
fsType: fsType,
readOnly: readOnly,
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
mountOptions: util.MountOptionFromSpec(spec),
}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
@ -202,7 +205,7 @@ func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume.
func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil {
glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
klog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
}
return &awsElasticBlockStoreDeleter{
@ -270,7 +273,7 @@ func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath
if length == 3 {
sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label
}
glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName)
klog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName)
}
awsVolume := &v1.Volume{
@ -309,12 +312,18 @@ func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice(
return awsVolume.ResizeDisk(volumeID, oldSize, newSize)
}
func (plugin *awsElasticBlockStorePlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error {
_, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath)
return err
}
var _ volume.FSResizableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.ExpandableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.VolumePluginWithAttachLimits = &awsElasticBlockStorePlugin{}
// Abstract interface to PD operations.
type ebsManager interface {
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error)
CreateVolume(provisioner *awsElasticBlockStoreProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *awsElasticBlockStoreDeleter) error
}
@ -343,7 +352,8 @@ type awsElasticBlockStoreMounter struct {
// Specifies whether the disk will be attached as read-only.
readOnly bool
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter *mount.SafeFormatAndMount
diskMounter *mount.SafeFormatAndMount
mountOptions []string
}
var _ volume.Mounter = &awsElasticBlockStoreMounter{}
@ -372,9 +382,9 @@ func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
klog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mount point: %s %v", dir, err)
klog.Errorf("cannot validate mount point: %s %v", dir, err)
return err
}
if !notMnt {
@ -392,31 +402,32 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error
if b.readOnly {
options = append(options, "ro")
}
err = b.mounter.Mount(globalPDPath, dir, "", options)
mountOptions := util.JoinMountOptions(options, b.mountOptions)
err = b.mounter.Mount(globalPDPath, dir, "", mountOptions)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("failed to unmount %s: %v", dir, mntErr)
klog.Errorf("failed to unmount %s: %v", dir, mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
glog.Errorf("Mount of disk %s failed: %v", dir, err)
klog.Errorf("Mount of disk %s failed: %v", dir, err)
return err
}
@ -424,7 +435,7 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(4).Infof("Successfully mounted %s", dir)
klog.V(4).Infof("Successfully mounted %s", dir)
return nil
}
@ -440,11 +451,11 @@ func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (stri
basePath := filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
rel, err := filepath.Rel(basePath, globalPath)
if err != nil {
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
klog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
return "", err
}
if strings.Contains(rel, "../") {
glog.Errorf("Unexpected mount path: %s", globalPath)
klog.Errorf("Unexpected mount path: %s", globalPath)
return "", fmt.Errorf("unexpected mount path: " + globalPath)
}
// Reverse the :// replacement done in makeGlobalPDPath
@ -452,7 +463,7 @@ func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (stri
if strings.HasPrefix(volumeID, "aws/") {
volumeID = strings.Replace(volumeID, "aws/", "aws://", 1)
}
glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID)
klog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID)
return volumeID, nil
}
@ -504,9 +515,9 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies)
if err != nil {
glog.Errorf("Provision failed: %v", err)
klog.Errorf("Provision failed: %v", err)
return nil, err
}
@ -514,6 +525,15 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow
fstype = "ext4"
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = c.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fstype = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
@ -528,6 +548,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(volumeID),
@ -544,17 +565,22 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
requirements := make([]v1.NodeSelectorRequirement, 0)
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}})
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements
}
return pv, nil

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
package awsebs
import (
"fmt"
@ -22,7 +22,8 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
@ -33,11 +34,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.BlockVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName)
@ -46,7 +43,7 @@ func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types.
if err != nil {
return nil, err
}
glog.V(5).Infof("globalMapPathUUID: %s", globalMapPathUUID)
klog.V(5).Infof("globalMapPathUUID: %s", globalMapPathUUID)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) <= 1 {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
package awsebs
import (
"os"

View File

@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
package awsebs
import (
"fmt"
"os"
"path/filepath"
"reflect"
"testing"
"k8s.io/api/core/v1"
@ -84,7 +85,7 @@ type fakePDManager struct {
// TODO(jonesdl) To fully test this, we could create a loopback device
// and mount that instead.
func (fake *fakePDManager) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error) {
func (fake *fakePDManager) CreateVolume(c *awsElasticBlockStoreProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error) {
labels = make(map[string]string)
labels["fakepdmanager"] = "yes"
return "test-aws-volume-name", 100, labels, "", nil
@ -166,13 +167,14 @@ func TestPlugin(t *testing.T) {
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
if err != nil {
t.Errorf("Error creating new provisioner:%v", err)
}
persistentSpec, err := provisioner.Provision(nil, nil)
if err != nil {
t.Errorf("Provision() failed: %v", err)
@ -191,6 +193,37 @@ func TestPlugin(t *testing.T) {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
}
// check nodeaffinity members
if persistentSpec.Spec.NodeAffinity == nil {
t.Errorf("Provision() returned unexpected nil NodeAffinity")
}
if persistentSpec.Spec.NodeAffinity.Required == nil {
t.Errorf("Provision() returned unexpected nil NodeAffinity.Required")
}
n := len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms)
if n != 1 {
t.Errorf("Provision() returned unexpected number of NodeSelectorTerms %d. Expected %d", n, 1)
}
n = len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions)
if n != 1 {
t.Errorf("Provision() returned unexpected number of MatchExpressions %d. Expected %d", n, 1)
}
req := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0]
if req.Key != "fakepdmanager" {
t.Errorf("Provision() returned unexpected requirement key in NodeAffinity %v", req.Key)
}
if req.Operator != v1.NodeSelectorOpIn {
t.Errorf("Provision() returned unexpected requirement operator in NodeAffinity %v", req.Operator)
}
if len(req.Values) != 1 || req.Values[0] != "yes" {
t.Errorf("Provision() returned unexpected requirement value in NodeAffinity %v", req.Values)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
@ -293,3 +326,53 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
t.Errorf("Volume Unmounter can be type-assert to Mounter")
}
}
func TestMountOptions(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("aws-ebs")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
MountOptions: []string{"_netdev"},
},
}
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
mountOptions := fakeMounter.MountPoints[0].Opts
expectedMountOptions := []string{"_netdev", "bind"}
if !reflect.DeepEqual(mountOptions, expectedMountOptions) {
t.Errorf("Expected mount options to be %v got %v", expectedMountOptions, mountOptions)
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
package awsebs
import (
"fmt"
@ -24,9 +24,12 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
@ -40,10 +43,13 @@ const (
maxRetries = 10
checkSleepDuration = time.Second
errorSleepDuration = 5 * time.Second
ebsMaxReplicasInAZ = 1
)
// AWSDiskUtil provides operations for EBS volume.
type AWSDiskUtil struct{}
// DeleteVolume deletes an AWS EBS volume.
func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error {
cloud, err := getCloudProvider(d.awsElasticBlockStore.plugin.host.GetCloudProvider())
if err != nil {
@ -54,20 +60,20 @@ func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error {
if err != nil {
// AWS cloud provider returns volume.deletedVolumeInUseError when
// necessary, no handling needed here.
glog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err)
klog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err)
return err
}
if deleted {
glog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID)
klog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID)
} else {
glog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID)
klog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID)
}
return nil
}
// CreateVolume creates an AWS EBS volume.
// Returns: volumeID, volumeSizeGB, labels, error
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.KubernetesVolumeID, int, map[string]string, string, error) {
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (aws.KubernetesVolumeID, int, map[string]string, string, error) {
cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider())
if err != nil {
return "", 0, nil, "", err
@ -83,50 +89,16 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
tags["Name"] = volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
// AWS works with gigabytes, convert to GiB with rounding up
requestGB := int(volumeutil.RoundUpSize(requestBytes, 1024*1024*1024))
volumeOptions := &aws.VolumeOptions{
CapacityGB: requestGB,
Tags: tags,
PVCName: c.options.PVC.Name,
}
fstype := ""
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
volumeOptions.ZonePresent = false
volumeOptions.ZonesPresent = false
for k, v := range c.options.Parameters {
switch strings.ToLower(k) {
case "type":
volumeOptions.VolumeType = v
case "zone":
volumeOptions.ZonePresent = true
volumeOptions.AvailabilityZone = v
case "zones":
volumeOptions.ZonesPresent = true
volumeOptions.AvailabilityZones = v
case "iopspergb":
volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
if err != nil {
return "", 0, nil, "", fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
}
case "encrypted":
volumeOptions.Encrypted, err = strconv.ParseBool(v)
if err != nil {
return "", 0, nil, "", fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
}
case "kmskeyid":
volumeOptions.KmsKeyId = v
case volume.VolumeParameterFSType:
fstype = v
default:
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
}
zonesWithNodes, err := cloud.GetCandidateZonesForDynamicVolume()
if err != nil {
return "", 0, nil, "", fmt.Errorf("error querying for all zones: %v", err)
}
if volumeOptions.ZonePresent && volumeOptions.ZonesPresent {
return "", 0, nil, "", fmt.Errorf("both zone and zones StorageClass parameters must not be used at the same time")
volumeOptions, err := populateVolumeOptions(c.plugin.GetPluginName(), c.options.PVC.Name, capacity, tags, c.options.Parameters, node, allowedTopologies, zonesWithNodes)
if err != nil {
klog.V(2).Infof("Error populating EBS options: %v", err)
return "", 0, nil, "", err
}
// TODO: implement PVC.Selector parsing
@ -136,18 +108,80 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
name, err := cloud.CreateDisk(volumeOptions)
if err != nil {
glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
klog.V(2).Infof("Error creating EBS Disk volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created EBS Disk volume %s", name)
klog.V(2).Infof("Successfully created EBS Disk volume %s", name)
labels, err := cloud.GetVolumeLabels(name)
if err != nil {
// We don't really want to leak the volume here...
glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
klog.Errorf("error building labels for new EBS volume %q: %v", name, err)
}
return name, int(requestGB), labels, fstype, nil
fstype := ""
if v, ok := c.options.Parameters[volume.VolumeParameterFSType]; ok {
fstype = v
}
return name, volumeOptions.CapacityGB, labels, fstype, nil
}
// returns volumeOptions for EBS based on storageclass parameters and node configuration
func populateVolumeOptions(pluginName, pvcName string, capacityGB resource.Quantity, tags map[string]string, storageParams map[string]string, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm, zonesWithNodes sets.String) (*aws.VolumeOptions, error) {
requestGiB, err := volumeutil.RoundUpToGiBInt(capacityGB)
if err != nil {
return nil, err
}
volumeOptions := &aws.VolumeOptions{
CapacityGB: requestGiB,
Tags: tags,
}
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
zonePresent := false
zonesPresent := false
var zone string
var zones sets.String
for k, v := range storageParams {
switch strings.ToLower(k) {
case "type":
volumeOptions.VolumeType = v
case "zone":
zonePresent = true
zone = v
case "zones":
zonesPresent = true
zones, err = volumeutil.ZonesToSet(v)
if err != nil {
return nil, fmt.Errorf("error parsing zones %s, must be strings separated by commas: %v", zones, err)
}
case "iopspergb":
volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
}
case "encrypted":
volumeOptions.Encrypted, err = strconv.ParseBool(v)
if err != nil {
return nil, fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
}
case "kmskeyid":
volumeOptions.KmsKeyID = v
case volume.VolumeParameterFSType:
// Do nothing but don't make this fail
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, pluginName)
}
}
volumeOptions.AvailabilityZone, err = volumeutil.SelectZoneForVolume(zonePresent, zonesPresent, zone, zones, zonesWithNodes, node, allowedTopologies, pvcName)
if err != nil {
return nil, err
}
return volumeOptions, nil
}
// Returns the first path that exists, or empty string if none exist.
@ -167,11 +201,11 @@ func verifyDevicePath(devicePaths []string) (string, error) {
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
allPathsRemoved := true
for _, path := range devicePaths {
if exists, err := volumeutil.PathExists(path); err != nil {
exists, err := volumeutil.PathExists(path)
if err != nil {
return false, fmt.Errorf("Error checking if path exists: %v", err)
} else {
allPathsRemoved = allPathsRemoved && !exists
}
allPathsRemoved = allPathsRemoved && !exists
}
return allPathsRemoved, nil
@ -180,7 +214,7 @@ func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
// Returns list of all paths for given EBS mount
// This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id)
// Here it is mostly about applying the partition path
func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string {
func getDiskByIDPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string {
devicePaths := []string{}
if devicePath != "" {
devicePaths = append(devicePaths, devicePath)
@ -196,14 +230,14 @@ func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, deviceP
// and we have to get the volume id from the nvme interface
awsVolumeID, err := volumeID.MapToAWSVolumeID()
if err != nil {
glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err)
klog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err)
} else {
// This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/
// For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23
nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1)
nvmePath, err := findNvmeVolume(nvmeName)
if err != nil {
glog.Warningf("error looking for nvme volume %q: %v", volumeID, err)
klog.Warningf("error looking for nvme volume %q: %v", volumeID, err)
} else if nvmePath != "" {
devicePaths = append(devicePaths, nvmePath)
}
@ -229,14 +263,14 @@ func findNvmeVolume(findName string) (device string, err error) {
stat, err := os.Lstat(p)
if err != nil {
if os.IsNotExist(err) {
glog.V(6).Infof("nvme path not found %q", p)
klog.V(6).Infof("nvme path not found %q", p)
return "", nil
}
return "", fmt.Errorf("error getting stat of %q: %v", p, err)
}
if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
glog.Warningf("nvme file %q found, but was not a symlink", p)
klog.Warningf("nvme file %q found, but was not a symlink", p)
return "", nil
}

View File

@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package aws_ebs contains the internal representation of AWS Elastic
// Package awsebs contains the internal representation of AWS Elastic
// Block Store volumes.
package aws_ebs // import "k8s.io/kubernetes/pkg/volume/aws_ebs"
package awsebs // import "k8s.io/kubernetes/pkg/volume/awsebs"

View File

@ -11,68 +11,37 @@ go_library(
srcs = [
"attacher.go",
"azure_common.go",
"azure_common_linux.go",
"azure_common_unsupported.go",
"azure_common_windows.go",
"azure_dd.go",
"azure_dd_block.go",
"azure_mounter.go",
"azure_provision.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"azure_common_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"azure_common_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"azure_common_windows.go",
],
"//conditions:default": [],
}),
],
importpath = "k8s.io/kubernetes/pkg/volume/azure_dd",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -95,15 +64,19 @@ go_test(
"azure_common_test.go",
"azure_dd_block_test.go",
"azure_dd_test.go",
"azure_provision_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)

View File

@ -25,13 +25,13 @@ import (
"strconv"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/golang/glog"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
@ -52,20 +52,23 @@ type azureDiskAttacher struct {
var _ volume.Attacher = &azureDiskAttacher{}
var _ volume.Detacher = &azureDiskDetacher{}
var _ volume.DeviceMounter = &azureDiskAttacher{}
var _ volume.DeviceUnmounter = &azureDiskDetacher{}
// acquire lock to get an lun number
var getLunMutex = keymutex.NewKeyMutex()
var getLunMutex = keymutex.NewHashed(0)
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
glog.Warningf("failed to get azure disk spec (%v)", err)
klog.Warningf("failed to get azure disk spec (%v)", err)
return "", err
}
instanceid, err := a.cloud.InstanceID(context.TODO(), nodeName)
if err != nil {
glog.Warningf("failed to get azure instance id (%v)", err)
klog.Warningf("failed to get azure instance id (%v)", err)
return "", fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
}
@ -77,31 +80,31 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (
lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
if err == cloudprovider.InstanceNotFound {
// Log error and continue with attach
glog.Warningf(
klog.Warningf(
"Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v",
instanceid, err)
}
if err == nil {
// Volume is already attached to node.
glog.V(4).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
klog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
} else {
glog.V(4).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName)
klog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName)
getLunMutex.LockKey(instanceid)
defer getLunMutex.UnlockKey(instanceid)
lun, err = diskController.GetNextDiskLun(nodeName)
if err != nil {
glog.Warningf("no LUN available for instance %q (%v)", nodeName, err)
klog.Warningf("no LUN available for instance %q (%v)", nodeName, err)
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err)
}
glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
if err == nil {
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
klog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
} else {
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
klog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
}
}
@ -116,7 +119,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty
for _, spec := range specs {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
klog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
@ -132,7 +135,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty
attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
klog.Errorf(
"azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
@ -142,15 +145,13 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
klog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
}
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
var err error
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
@ -164,13 +165,22 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string,
nodeName := types.NodeName(a.plugin.host.GetHostName())
diskName := volumeSource.DiskName
glog.V(5).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)",
diskName, volumeSource.DataDiskURI, nodeName, devicePath)
lun, err := diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName)
if err != nil {
return "", err
var lun int32
if runtime.GOOS == "windows" {
klog.V(2).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)",
diskName, volumeSource.DataDiskURI, nodeName, devicePath)
lun, err = diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName)
if err != nil {
return "", err
}
klog.V(2).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun)
} else {
lun, err = getDiskLUN(devicePath)
if err != nil {
return "", err
}
}
glog.V(5).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun)
exec := a.plugin.host.GetExec(a.plugin.GetPluginName())
io := &osIOHandler{}
@ -237,9 +247,9 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str
// testing original mount point, make sure the mount link is valid
if _, err := (&osIOHandler{}).ReadDir(deviceMountPath); err != nil {
// mount link is invalid, now unmount and remount later
glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err)
klog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err)
if err := mounter.Unmount(deviceMountPath); err != nil {
glog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err)
klog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err)
return err
}
notMnt = true
@ -274,11 +284,11 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
instanceid, err := d.cloud.InstanceID(context.TODO(), nodeName)
if err != nil {
glog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err)
klog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err)
return nil
}
glog.V(4).Infof("detach %v from node %q", diskURI, nodeName)
klog.V(2).Infof("detach %v from node %q", diskURI, nodeName)
diskController, err := getDiskController(d.plugin.host)
if err != nil {
@ -290,10 +300,10 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
err = diskController.DetachDiskByName("", diskURI, nodeName)
if err != nil {
glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
klog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
}
glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName)
klog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName)
return err
}
@ -301,9 +311,9 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
err := util.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
if err == nil {
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
} else {
glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
klog.Warningf("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
}
return err
}

View File

@ -21,9 +21,11 @@ import (
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
libstrings "strings"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -36,8 +38,8 @@ import (
)
const (
defaultStorageAccountType = storage.StandardLRS
defaultAzureDiskKind = v1.AzureSharedBlobDisk
defaultStorageAccountType = compute.StandardLRS
defaultAzureDiskKind = v1.AzureManagedDisk
defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingNone
)
@ -60,7 +62,7 @@ var (
string(api.AzureDedicatedBlobDisk),
string(api.AzureManagedDisk))
supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS", "Standard_GRS", "Standard_RAGRS")
lunPathRE = regexp.MustCompile(`/dev/disk/azure/scsi(?:.*)/lun(.+)`)
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
@ -122,16 +124,20 @@ func normalizeKind(kind string) (v1.AzureDataDiskKind, error) {
return v1.AzureDataDiskKind(kind), nil
}
func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) {
func normalizeStorageAccountType(storageAccountType string) (compute.DiskStorageAccountTypes, error) {
if storageAccountType == "" {
return defaultStorageAccountType, nil
}
if !supportedStorageAccountTypes.Has(storageAccountType) {
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List())
sku := compute.DiskStorageAccountTypes(storageAccountType)
supportedSkuNames := compute.PossibleDiskStorageAccountTypesValues()
for _, s := range supportedSkuNames {
if sku == s {
return sku, nil
}
}
return storage.SkuName(storageAccountType), nil
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedSkuNames)
}
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {
@ -199,3 +205,25 @@ func strFirstLetterToUpper(str string) string {
}
return libstrings.ToUpper(string(str[0])) + str[1:]
}
// getDiskLUN : deviceInfo could be a LUN number or a device path, e.g. /dev/disk/azure/scsi1/lun2
func getDiskLUN(deviceInfo string) (int32, error) {
var diskLUN string
if len(deviceInfo) <= 2 {
diskLUN = deviceInfo
} else {
// extract the LUN num from a device path
matches := lunPathRE.FindStringSubmatch(deviceInfo)
if len(matches) == 2 {
diskLUN = matches[1]
} else {
return -1, fmt.Errorf("cannot parse deviceInfo: %s", deviceInfo)
}
}
lun, err := strconv.Atoi(diskLUN)
if err != nil {
return -1, err
}
return int32(lun), nil
}

View File

@ -24,7 +24,7 @@ import (
"strconv"
libstrings "strings"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
)
@ -42,21 +42,21 @@ func listAzureDiskPath(io ioHandler) []string {
}
}
}
glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
klog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
return azureDiskList
}
// getDiskLinkByDevName get disk link by device name from devLinkPath, e.g. /dev/disk/azure/, /dev/disk/by-id/
func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, error) {
dirs, err := io.ReadDir(devLinkPath)
glog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath)
klog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath)
if err == nil {
for _, f := range dirs {
diskPath := devLinkPath + f.Name()
glog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath)
klog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath)
link, linkErr := io.Readlink(diskPath)
if linkErr != nil {
glog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr)
klog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr)
continue
}
if libstrings.HasSuffix(link, devName) {
@ -75,11 +75,11 @@ func scsiHostRescan(io ioHandler, exec mount.Exec) {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
if err = io.WriteFile(name, data, 0666); err != nil {
glog.Warningf("failed to rescan scsi host %s", name)
klog.Warningf("failed to rescan scsi host %s", name)
}
}
} else {
glog.Warningf("failed to read %s, err %v", scsi_path, err)
klog.Warningf("failed to read %s, err %v", scsi_path, err)
}
}
@ -101,10 +101,10 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
continue
}
if len(azureDisks) == 0 {
glog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name)
klog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name)
target, err := strconv.Atoi(arr[0])
if err != nil {
glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
klog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
continue
}
// as observed, targets 0-3 are used by OS disks. Skip them
@ -118,7 +118,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
l, err := strconv.Atoi(arr[3])
if err != nil {
// unknown path format, continue to read the next one
glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err)
klog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err)
continue
}
if lun == l {
@ -127,24 +127,24 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
vendorPath := filepath.Join(sys_path, name, "vendor")
vendorBytes, err := io.ReadFile(vendorPath)
if err != nil {
glog.Errorf("failed to read device vendor, err: %v", err)
klog.Errorf("failed to read device vendor, err: %v", err)
continue
}
vendor := libstrings.TrimSpace(string(vendorBytes))
if libstrings.ToUpper(vendor) != "MSFT" {
glog.V(4).Infof("vendor doesn't match VHD, got %s", vendor)
klog.V(4).Infof("vendor doesn't match VHD, got %s", vendor)
continue
}
modelPath := filepath.Join(sys_path, name, "model")
modelBytes, err := io.ReadFile(modelPath)
if err != nil {
glog.Errorf("failed to read device model, err: %v", err)
klog.Errorf("failed to read device model, err: %v", err)
continue
}
model := libstrings.TrimSpace(string(modelBytes))
if libstrings.ToUpper(model) != "VIRTUAL DISK" {
glog.V(4).Infof("model doesn't match VHD, got %s", model)
klog.V(4).Infof("model doesn't match VHD, got %s", model)
continue
}
@ -154,7 +154,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
found := false
devName := dev[0].Name()
for _, diskName := range azureDisks {
glog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName)
klog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName)
if devName == diskName {
found = true
break
@ -165,10 +165,10 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
for _, devLinkPath := range devLinkPaths {
diskPath, err := getDiskLinkByDevName(io, devLinkPath, devName)
if err == nil {
glog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath)
klog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath)
return diskPath, nil
}
glog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err)
klog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err)
}
return "/dev/" + devName, nil
}

View File

@ -24,6 +24,9 @@ import (
"testing"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/util/mount"
)
@ -134,3 +137,103 @@ func TestIoHandler(t *testing.T) {
}
}
}
func TestNormalizeStorageAccountType(t *testing.T) {
tests := []struct {
storageAccountType string
expectedAccountType compute.DiskStorageAccountTypes
expectError bool
}{
{
storageAccountType: "",
expectedAccountType: compute.StandardLRS,
expectError: false,
},
{
storageAccountType: "NOT_EXISTING",
expectedAccountType: "",
expectError: true,
},
{
storageAccountType: "Standard_LRS",
expectedAccountType: compute.StandardLRS,
expectError: false,
},
{
storageAccountType: "Premium_LRS",
expectedAccountType: compute.PremiumLRS,
expectError: false,
},
{
storageAccountType: "StandardSSD_LRS",
expectedAccountType: compute.StandardSSDLRS,
expectError: false,
},
{
storageAccountType: "UltraSSD_LRS",
expectedAccountType: compute.UltraSSDLRS,
expectError: false,
},
}
for _, test := range tests {
result, err := normalizeStorageAccountType(test.storageAccountType)
assert.Equal(t, result, test.expectedAccountType)
assert.Equal(t, err != nil, test.expectError, fmt.Sprintf("error msg: %v", err))
}
}
func TestGetDiskLUN(t *testing.T) {
tests := []struct {
deviceInfo string
expectedLUN int32
expectError bool
}{
{
deviceInfo: "0",
expectedLUN: 0,
expectError: false,
},
{
deviceInfo: "10",
expectedLUN: 10,
expectError: false,
},
{
deviceInfo: "11d",
expectedLUN: -1,
expectError: true,
},
{
deviceInfo: "999",
expectedLUN: -1,
expectError: true,
},
{
deviceInfo: "",
expectedLUN: -1,
expectError: true,
},
{
deviceInfo: "/dev/disk/azure/scsi1/lun2",
expectedLUN: 2,
expectError: false,
},
{
deviceInfo: "/dev/disk/azure/scsi0/lun12",
expectedLUN: 12,
expectError: false,
},
{
deviceInfo: "/dev/disk/by-id/scsi1/lun2",
expectedLUN: -1,
expectError: true,
},
}
for _, test := range tests {
result, err := getDiskLUN(test.deviceInfo)
assert.Equal(t, result, test.expectedLUN)
assert.Equal(t, err != nil, test.expectError, fmt.Sprintf("error msg: %v", err))
}
}

View File

@ -24,7 +24,7 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
)
@ -33,7 +33,7 @@ func scsiHostRescan(io ioHandler, exec mount.Exec) {
cmd := "Update-HostStorageCache"
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output))
klog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output))
}
}
@ -42,7 +42,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error
cmd := `Get-Disk | select number, location | ConvertTo-Json`
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output))
klog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output))
return "", err
}
@ -52,7 +52,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error
var data []map[string]interface{}
if err = json.Unmarshal(output, &data); err != nil {
glog.Errorf("Get-Disk output is not a json array, output: %q", string(output))
klog.Errorf("Get-Disk output is not a json array, output: %q", string(output))
return "", err
}
@ -66,27 +66,27 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error
arr := strings.Split(location, " ")
arrLen := len(arr)
if arrLen < 3 {
glog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation)
klog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation)
continue
}
glog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1])
klog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1])
//last element of location field is LUN number, e.g.
// "location": "Integrated : Adapter 3 : Port 0 : Target 0 : LUN 1"
l, err := strconv.Atoi(arr[arrLen-1])
if err != nil {
glog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1])
klog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1])
continue
}
if l == lun {
glog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun)
klog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun)
if d, ok := v["number"]; ok {
if diskNum, ok := d.(float64); ok {
glog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun)
klog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun)
return strconv.Itoa(int(diskNum)), nil
}
glog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location)
klog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location)
}
return "", fmt.Errorf("LUN(%d) found, but could not get disk number, location: %q", lun, location)
}
@ -99,7 +99,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
if err := mount.ValidateDiskNumber(disk); err != nil {
glog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err)
klog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err)
return
}
@ -111,8 +111,8 @@ func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
cmd += fmt.Sprintf(" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", fstype)
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output))
klog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output))
} else {
glog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype)
klog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype)
}
}

View File

@ -17,16 +17,19 @@ limitations under the License.
package azure_dd
import (
"context"
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"k8s.io/klog"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
@ -36,7 +39,7 @@ type DiskController interface {
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
DeleteBlobDisk(diskUri string) error
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, resourceGroup string, sizeGB int, tags map[string]string) (string, error)
CreateManagedDisk(options *azure.ManagedDiskOptions) (string, error)
DeleteManagedDisk(diskURI string) error
// Attaches the disk to the host machine.
@ -59,6 +62,15 @@ type DiskController interface {
// Expand the disk to new size
ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
// GetAzureDiskLabels gets availability zone labels for Azuredisk.
GetAzureDiskLabels(diskURI string) (map[string]string, error)
// GetActiveZones returns all the zones in which k8s nodes are currently running.
GetActiveZones() (sets.String, error)
// GetLocation returns the location in which k8s cluster is currently running.
GetLocation() string
}
type azureDataDiskPlugin struct {
@ -72,9 +84,14 @@ var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.VolumePluginWithAttachLimits = &azureDataDiskPlugin{}
var _ volume.ExpandableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeviceMountableVolumePlugin = &azureDataDiskPlugin{}
// store vm size list in current region
var vmSizeList *[]compute.VirtualMachineSize
const (
azureDataDiskPluginName = "kubernetes.io/azure-disk"
defaultAzureVolumeLimit = 16
)
func ProbeVolumePlugins() []volume.VolumePlugin {
@ -118,26 +135,65 @@ func (plugin *azureDataDiskPlugin) SupportsBulkVolumeVerification() bool {
func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) {
volumeLimits := map[string]int64{
util.AzureVolumeLimitKey: 16,
util.AzureVolumeLimitKey: defaultAzureVolumeLimit,
}
cloud := plugin.host.GetCloudProvider()
// if we can't fetch cloudprovider we return an error
// hoping external CCM or admin can set it. Returning
// default values from here will mean, no one can
// override them.
if cloud == nil {
return nil, fmt.Errorf("No cloudprovider present")
az, err := getCloud(plugin.host)
if err != nil {
// if we can't fetch cloudprovider we return an error
// hoping external CCM or admin can set it. Returning
// default values from here will mean, no one can
// override them.
return nil, fmt.Errorf("failed to get azure cloud in GetVolumeLimits, plugin.host: %s", plugin.host.GetHostName())
}
if cloud.ProviderName() != azure.CloudProviderName {
return nil, fmt.Errorf("Expected Azure cloudprovider, got %s", cloud.ProviderName())
instances, ok := az.Instances()
if !ok {
klog.Warningf("Failed to get instances from cloud provider")
return volumeLimits, nil
}
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
if err != nil {
klog.Errorf("Failed to get instance type from Azure cloud provider, nodeName: %s", plugin.host.GetNodeName())
return volumeLimits, nil
}
if vmSizeList == nil {
result, err := az.VirtualMachineSizesClient.List(context.TODO(), az.Location)
if err != nil || result.Value == nil {
klog.Errorf("failed to list vm sizes in GetVolumeLimits, plugin.host: %s, location: %s", plugin.host.GetHostName(), az.Location)
return volumeLimits, nil
}
vmSizeList = result.Value
}
volumeLimits = map[string]int64{
util.AzureVolumeLimitKey: getMaxDataDiskCount(instanceType, vmSizeList),
}
return volumeLimits, nil
}
func getMaxDataDiskCount(instanceType string, sizeList *[]compute.VirtualMachineSize) int64 {
if sizeList == nil {
return defaultAzureVolumeLimit
}
vmsize := strings.ToUpper(instanceType)
for _, size := range *sizeList {
if size.Name == nil || size.MaxDataDiskCount == nil {
klog.Errorf("failed to get vm size in getMaxDataDiskCount")
continue
}
if strings.ToUpper(*size.Name) == vmsize {
klog.V(2).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount)
return int64(*size.MaxDataDiskCount)
}
}
return defaultAzureVolumeLimit
}
func (plugin *azureDataDiskPlugin) VolumeLimitKey(spec *volume.Spec) string {
return util.AzureVolumeLimitKey
}
@ -152,7 +208,7 @@ func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessM
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err)
klog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err)
return nil, err
}
@ -165,7 +221,7 @@ func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
klog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
return nil, err
}
@ -245,6 +301,13 @@ func (plugin *azureDataDiskPlugin) ExpandVolumeDevice(
return diskController.ResizeDisk(spec.PersistentVolume.Spec.AzureDisk.DataDiskURI, oldSize, newSize)
}
func (plugin *azureDataDiskPlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error {
_, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath)
return err
}
var _ volume.FSResizableVolumePlugin = &azureDataDiskPlugin{}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
@ -267,5 +330,13 @@ func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath str
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
return m.GetMountRefs(deviceMountPath)
}
func (plugin *azureDataDiskPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *azureDataDiskPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}

View File

@ -20,10 +20,10 @@ import (
"fmt"
"path/filepath"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
@ -44,7 +44,7 @@ func (plugin *azureDataDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, vo
if err != nil {
return nil, err
}
glog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID)
klog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) <= 1 {
@ -63,7 +63,7 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName string) (*volume.S
if len(diskName) <= 1 {
return nil, fmt.Errorf("failed to get diskName from global path=%s", globalMapPath)
}
glog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName)
klog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName)
block := v1.PersistentVolumeBlock
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{

View File

@ -20,6 +20,10 @@ import (
"os"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
@ -53,3 +57,37 @@ func TestCanSupport(t *testing.T) {
// fakeAzureProvider type was removed because all functions were not used
// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test.
func TestGetMaxDataDiskCount(t *testing.T) {
tests := []struct {
instanceType string
sizeList *[]compute.VirtualMachineSize
expectResult int64
}{
{
instanceType: "standard_d2_v2",
sizeList: &[]compute.VirtualMachineSize{
{Name: to.StringPtr("Standard_D2_V2"), MaxDataDiskCount: to.Int32Ptr(8)},
{Name: to.StringPtr("Standard_D3_V2"), MaxDataDiskCount: to.Int32Ptr(16)},
},
expectResult: 8,
},
{
instanceType: "NOT_EXISTING",
sizeList: &[]compute.VirtualMachineSize{
{Name: to.StringPtr("Standard_D2_V2"), MaxDataDiskCount: to.Int32Ptr(8)},
},
expectResult: defaultAzureVolumeLimit,
},
{
instanceType: "",
sizeList: &[]compute.VirtualMachineSize{},
expectResult: defaultAzureVolumeLimit,
},
}
for _, test := range tests {
result := getMaxDataDiskCount(test.instanceType, test.sizeList)
assert.Equal(t, test.expectResult, result)
}
}

View File

@ -21,8 +21,8 @@ import (
"os"
"runtime"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
@ -46,7 +46,7 @@ func (m *azureDiskMounter) GetAttributes() volume.Attributes {
readOnly := false
volumeSource, _, err := getVolumeSource(m.spec)
if err != nil {
glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err)
klog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err)
} else if volumeSource.ReadOnly != nil {
readOnly = *volumeSource.ReadOnly
}
@ -74,7 +74,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
volumeSource, _, err := getVolumeSource(m.spec)
if err != nil {
glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
klog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
return err
}
@ -82,20 +82,20 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err)
klog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err)
return err
}
if !mountPoint {
// testing original mount point, make sure the mount link is valid
_, err := (&osIOHandler{}).ReadDir(dir)
if err == nil {
glog.V(4).Infof("azureDisk - already mounted to target %s", dir)
klog.V(4).Infof("azureDisk - already mounted to target %s", dir)
return nil
}
// mount link is invalid, now unmount and remount later
glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err)
klog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err)
if err := mounter.Unmount(dir); err != nil {
glog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err)
klog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err)
return err
}
mountPoint = true
@ -104,7 +104,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
if runtime.GOOS != "windows" {
// in windows, we will use mklink to mount, will MkdirAll in Mount func
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err)
klog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err)
return err
}
}
@ -119,7 +119,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
options = util.JoinMountOptions(m.options.MountOptions, options)
}
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
klog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
@ -131,7 +131,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
// Everything in the following control flow is meant as an
// attempt cleanup a failed setupAt (bind mount)
if mountErr != nil {
glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr)
klog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr)
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr)
@ -155,7 +155,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr)
}
glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr)
klog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr)
return mountErr
}
@ -163,7 +163,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
volume.SetVolumeOwnership(m, fsGroup)
}
glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
klog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
return nil
}
@ -175,11 +175,11 @@ func (u *azureDiskUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
glog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
klog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
mounter := u.plugin.host.GetMounter(u.plugin.GetPluginName())
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {

View File

@ -19,13 +19,18 @@ package azure_dd
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
@ -68,6 +73,25 @@ func (d *azureDiskDeleter) Delete() error {
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI)
}
// parseZoned parsed 'zoned' for storage class. If zoned is not specified (empty string),
// then it defaults to true for managed disks.
func parseZoned(zonedString string, kind v1.AzureDataDiskKind) (bool, error) {
if zonedString == "" {
return kind == v1.AzureManagedDisk, nil
}
zoned, err := strconv.ParseBool(zonedString)
if err != nil {
return false, fmt.Errorf("failed to parse 'zoned': %v", err)
}
if zoned && kind != v1.AzureManagedDisk {
return false, fmt.Errorf("zoned is only supported by managed disks")
}
return zoned, nil
}
func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
@ -85,7 +109,7 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
if len(p.options.PVC.Spec.AccessModes) == 1 {
if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] {
return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes)
return nil, fmt.Errorf("AzureDisk - mode %s is not supported by AzureDisk plugin (supported mode is %s)", p.options.PVC.Spec.AccessModes[0], supportedModes)
}
}
@ -96,12 +120,25 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
strKind string
err error
resourceGroup string
zoned bool
zonePresent bool
zonesPresent bool
strZoned string
availabilityZone string
availabilityZones sets.String
selectedAvailabilityZone string
diskIopsReadWrite string
diskMbpsReadWrite string
)
// maxLength = 79 - (4 for ".vhd") = 75
name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
requestGiB, err := util.RoundUpToGiBInt(capacity)
if err != nil {
return nil, err
}
for k, v := range p.options.Parameters {
switch strings.ToLower(k) {
@ -121,6 +158,21 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
fsType = strings.ToLower(v)
case "resourcegroup":
resourceGroup = v
case "zone":
zonePresent = true
availabilityZone = v
case "zones":
zonesPresent = true
availabilityZones, err = util.ZonesToSet(v)
if err != nil {
return nil, fmt.Errorf("error parsing zones %s, must be strings separated by commas: %v", v, err)
}
case "zoned":
strZoned = v
case "diskiopsreadwrite":
diskIopsReadWrite = v
case "diskmbpsreadwrite":
diskMbpsReadWrite = v
default:
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
}
@ -137,6 +189,25 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
return nil, err
}
zoned, err = parseZoned(strZoned, kind)
if err != nil {
return nil, err
}
if kind != v1.AzureManagedDisk {
if resourceGroup != "" {
return nil, errors.New("StorageClass option 'resourceGroup' can be used only for managed disks")
}
if zoned {
return nil, errors.New("StorageClass option 'zoned' parameter is only supported for managed disks")
}
}
if !zoned && (zonePresent || zonesPresent || len(allowedTopologies) > 0) {
return nil, fmt.Errorf("zone, zones and allowedTopologies StorageClass parameters must be used together with zoned parameter")
}
if cachingMode, err = normalizeCachingMode(cachingMode); err != nil {
return nil, err
}
@ -146,39 +217,76 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
return nil, err
}
if resourceGroup != "" && kind != v1.AzureManagedDisk {
return nil, errors.New("StorageClass option 'resourceGroup' can be used only for managed disks")
// Select zone for managed disks based on zone, zones and allowedTopologies.
if zoned {
activeZones, err := diskController.GetActiveZones()
if err != nil {
return nil, fmt.Errorf("error querying active zones: %v", err)
}
if availabilityZone != "" || availabilityZones.Len() != 0 || activeZones.Len() != 0 || len(allowedTopologies) != 0 {
selectedAvailabilityZone, err = util.SelectZoneForVolume(zonePresent, zonesPresent, availabilityZone, availabilityZones, activeZones, selectedNode, allowedTopologies, p.options.PVC.Name)
if err != nil {
return nil, err
}
}
}
// create disk
diskURI := ""
labels := map[string]string{}
if kind == v1.AzureManagedDisk {
tags := make(map[string]string)
if p.options.CloudTags != nil {
tags = *(p.options.CloudTags)
}
diskURI, err = diskController.CreateManagedDisk(name, skuName, resourceGroup, requestGB, tags)
volumeOptions := &azure.ManagedDiskOptions{
DiskName: name,
StorageAccountType: skuName,
ResourceGroup: resourceGroup,
PVCName: p.options.PVC.Name,
SizeGB: requestGiB,
Tags: tags,
AvailabilityZone: selectedAvailabilityZone,
DiskIOPSReadWrite: diskIopsReadWrite,
DiskMBpsReadWrite: diskMbpsReadWrite,
}
diskURI, err = diskController.CreateManagedDisk(volumeOptions)
if err != nil {
return nil, err
}
labels, err = diskController.GetAzureDiskLabels(diskURI)
if err != nil {
return nil, err
}
} else {
if kind == v1.AzureDedicatedBlobDisk {
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGB)
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGiB)
if err != nil {
return nil, err
}
} else {
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB)
diskURI, err = diskController.CreateBlobDisk(name, storage.SkuName(storageAccountType), requestGiB)
if err != nil {
return nil, err
}
}
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = p.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fsType = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: p.options.PVName,
Labels: map[string]string{},
Labels: labels,
Annotations: map[string]string{
"volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner",
},
@ -187,8 +295,9 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
AccessModes: supportedModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGiB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
CachingMode: &cachingMode,
@ -202,8 +311,52 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
},
}
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
pv.Spec.VolumeMode = p.options.PVC.Spec.VolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
nodeSelectorTerms := make([]v1.NodeSelectorTerm, 0)
if zoned {
// Set node affinity labels based on availability zone labels.
if len(labels) > 0 {
requirements := make([]v1.NodeSelectorRequirement, 0)
for k, v := range labels {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}})
}
nodeSelectorTerms = append(nodeSelectorTerms, v1.NodeSelectorTerm{
MatchExpressions: requirements,
})
}
} else {
// Set node affinity labels based on fault domains.
// This is required because unzoned AzureDisk can't be attached to zoned nodes.
// There are at most 3 fault domains available in each region.
// Refer https://docs.microsoft.com/en-us/azure/virtual-machines/windows/manage-availability.
for i := 0; i < 3; i++ {
requirements := []v1.NodeSelectorRequirement{
{
Key: kubeletapis.LabelZoneRegion,
Operator: v1.NodeSelectorOpIn,
Values: []string{diskController.GetLocation()},
},
{
Key: kubeletapis.LabelZoneFailureDomain,
Operator: v1.NodeSelectorOpIn,
Values: []string{strconv.Itoa(i)},
},
}
nodeSelectorTerms = append(nodeSelectorTerms, v1.NodeSelectorTerm{
MatchExpressions: requirements,
})
}
}
if len(nodeSelectorTerms) > 0 {
pv.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: nodeSelectorTerms,
},
}
}
}
return pv, nil

View File

@ -0,0 +1,96 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
)
func TestParseZoned(t *testing.T) {
tests := []struct {
msg string
zoneString string
diskKind v1.AzureDataDiskKind
expected bool
expectError bool
}{
{
msg: "managed disk should default to zoned",
diskKind: v1.AzureManagedDisk,
expected: true,
},
{
msg: "shared blob disk should default to un-zoned",
diskKind: v1.AzureSharedBlobDisk,
expected: false,
},
{
msg: "shared dedicated disk should default to un-zoned",
diskKind: v1.AzureDedicatedBlobDisk,
expected: false,
},
{
msg: "managed disk should support zoned=true",
diskKind: v1.AzureManagedDisk,
zoneString: "true",
expected: true,
},
{
msg: "managed disk should support zoned=false",
diskKind: v1.AzureManagedDisk,
zoneString: "false",
expected: false,
},
{
msg: "shared blob disk should support zoned=false",
diskKind: v1.AzureSharedBlobDisk,
zoneString: "false",
expected: false,
},
{
msg: "shared blob disk shouldn't support zoned=true",
diskKind: v1.AzureSharedBlobDisk,
zoneString: "true",
expectError: true,
},
{
msg: "shared dedicated disk should support zoned=false",
diskKind: v1.AzureDedicatedBlobDisk,
zoneString: "false",
expected: false,
},
{
msg: "dedicated blob disk shouldn't support zoned=true",
diskKind: v1.AzureDedicatedBlobDisk,
zoneString: "true",
expectError: true,
},
}
for i, test := range tests {
real, err := parseZoned(test.zoneString, test.diskKind)
if test.expectError {
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
} else {
assert.Equal(t, test.expected, real, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
}
}
}

View File

@ -16,18 +16,19 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/volume/azure_file",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -41,11 +42,11 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@ -22,12 +22,12 @@ import (
"os"
"runtime"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
@ -237,20 +237,20 @@ func (b *azureFileMounter) SetUp(fsGroup *int64) error {
func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err)
klog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
return err
}
if !notMnt {
// testing original mount point, make sure the mount link is valid
if _, err := ioutil.ReadDir(dir); err == nil {
glog.V(4).Infof("azureFile - already mounted to target %s", dir)
klog.V(4).Infof("azureFile - already mounted to target %s", dir)
return nil
}
// mount link is invalid, now unmount and remount later
glog.Warningf("azureFile - ReadDir %s failed with %v, unmount this directory", dir, err)
klog.Warningf("azureFile - ReadDir %s failed with %v, unmount this directory", dir, err)
if err := b.mounter.Unmount(dir); err != nil {
glog.Errorf("azureFile - Unmount directory %s failed with %v", dir, err)
klog.Errorf("azureFile - Unmount directory %s failed with %v", dir, err)
return err
}
notMnt = true
@ -269,7 +269,9 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
if runtime.GOOS == "windows" {
mountOptions = []string{fmt.Sprintf("AZURE\\%s", accountName), accountKey}
} else {
os.MkdirAll(dir, 0700)
if err := os.MkdirAll(dir, 0700); err != nil {
return err
}
// parameters suggested by https://azure.microsoft.com/en-us/documentation/articles/storage-how-to-use-files-linux/
options := []string{fmt.Sprintf("username=%s,password=%s", accountName, accountKey)}
if b.readOnly {
@ -283,22 +285,22 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
klog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
@ -374,7 +376,7 @@ func getStorageEndpointSuffix(cloudprovider cloudprovider.Interface) string {
const publicCloudStorageEndpointSuffix = "core.windows.net"
azure, err := getAzureCloud(cloudprovider)
if err != nil {
glog.Warningf("No Azure cloud provider found. Using the Azure public cloud endpoint: %s", publicCloudStorageEndpointSuffix)
klog.Warningf("No Azure cloud provider found. Using the Azure public cloud endpoint: %s", publicCloudStorageEndpointSuffix)
return publicCloudStorageEndpointSuffix
}
return azure.Environment.StorageEndpointSuffix

View File

@ -20,11 +20,13 @@ import (
"fmt"
"strings"
"github.com/golang/glog"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
@ -38,7 +40,7 @@ var _ volume.ProvisionableVolumePlugin = &azureFilePlugin{}
// azure cloud provider should implement it
type azureCloudProvider interface {
// create a file share
CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error)
CreateFileShare(shareName, accountName, accountType, accountKind, resourceGroup, location string, requestGiB int) (string, string, error)
// delete a file share
DeleteFileShare(accountName, accountKey, shareName string) error
// resize a file share
@ -54,7 +56,7 @@ type azureFileDeleter struct {
func (plugin *azureFilePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
klog.V(4).Infof("failed to get azure provider")
return nil, err
}
@ -90,7 +92,7 @@ func (plugin *azureFilePlugin) newDeleterInternal(spec *volume.Spec, util azureU
func (plugin *azureFilePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
glog.V(4).Infof("failed to get azure provider")
klog.V(4).Infof("failed to get azure provider")
return nil, err
}
if len(options.PVC.Spec.AccessModes) == 0 {
@ -118,7 +120,7 @@ func (f *azureFileDeleter) GetPath() string {
}
func (f *azureFileDeleter) Delete() error {
glog.V(4).Infof("deleting volume %s", f.shareName)
klog.V(4).Infof("deleting volume %s", f.shareName)
return f.azureProvider.DeleteFileShare(f.accountName, f.accountKey, f.shareName)
}
@ -139,7 +141,7 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
return nil, fmt.Errorf("%s does not support block volume provisioning", a.plugin.GetPluginName())
}
var sku, location, account string
var sku, resourceGroup, location, account string
// File share name has a length limit of 63, and it cannot contain two consecutive '-'s.
name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
@ -160,6 +162,8 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
account = v
case "secretnamespace":
secretNamespace = v
case "resourcegroup":
resourceGroup = v
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
}
@ -169,7 +173,12 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure file")
}
account, key, err := a.azureProvider.CreateFileShare(name, account, sku, location, requestGiB)
// when use azure file premium, account kind should be specified as FileStorage
accountKind := string(storage.StorageV2)
if strings.HasPrefix(strings.ToLower(sku), "premium") {
accountKind = string(storage.FileStorage)
}
account, key, err := a.azureProvider.CreateFileShare(name, account, sku, accountKind, resourceGroup, location, requestGiB)
if err != nil {
return nil, err
}

View File

@ -31,8 +31,8 @@ const (
dirMode = "dir_mode"
gid = "gid"
vers = "vers"
defaultFileMode = "0755"
defaultDirMode = "0755"
defaultFileMode = "0777"
defaultDirMode = "0777"
defaultVers = "3.0"
)

View File

@ -18,10 +18,10 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -33,9 +33,9 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@ -1,5 +1,6 @@
approvers:
- rootfs
- jsafrane
- saad-ali
reviewers:
- rootfs
@ -7,3 +8,4 @@ reviewers:
- jsafrane
- jingxu97
- msau42
- cofyc

View File

@ -24,17 +24,17 @@ import (
"runtime"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// This is the primary entrypoint for volume plugins.
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&cephfsPlugin{nil}}
}
@ -110,7 +110,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.
}
for name, data := range secrets.Data {
secret = string(data)
glog.V(4).Infof("found ceph secret info: %s", name)
klog.V(4).Infof("found ceph secret info: %s", name)
}
}
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(plugin.GetPluginName()), secret)
@ -144,7 +144,7 @@ func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
path: path,
secret: secret,
id: id,
secret_file: secretFile,
secretFile: secretFile,
readonly: readOnly,
mounter: mounter,
plugin: plugin,
@ -182,16 +182,16 @@ func (plugin *cephfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*
// CephFS volumes represent a bare host file or directory mount of an CephFS export.
type cephfs struct {
volName string
podUID types.UID
mon []string
path string
id string
secret string
secret_file string
readonly bool
mounter mount.Interface
plugin *cephfsPlugin
volName string
podUID types.UID
mon []string
path string
id string
secret string
secretFile string
readonly bool
mounter mount.Interface
plugin *cephfsPlugin
volume.MetricsNil
mountOptions []string
}
@ -213,7 +213,7 @@ func (cephfsVolume *cephfsMounter) GetAttributes() volume.Attributes {
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (cephfsMounter *cephfsMounter) CanMount() error {
func (cephfsVolume *cephfsMounter) CanMount() error {
return nil
}
@ -225,7 +225,7 @@ func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error {
// SetUpAt attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
klog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
return err
}
@ -239,7 +239,7 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
// check whether it belongs to fuse, if not, default to use kernel mount.
if cephfsVolume.checkFuseMount() {
glog.V(4).Info("CephFS fuse mount.")
klog.V(4).Info("CephFS fuse mount.")
err = cephfsVolume.execFuseMount(dir)
// cleanup no matter if fuse mount fail.
keyringPath := cephfsVolume.GetKeyringPath()
@ -250,12 +250,12 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
if err == nil {
// cephfs fuse mount succeeded.
return nil
} else {
// if cephfs fuse mount failed, fallback to kernel mount.
glog.V(4).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err)
}
// if cephfs fuse mount failed, fallback to kernel mount.
klog.V(2).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err)
}
glog.V(4).Info("CephFS kernel mount.")
klog.V(4).Info("CephFS kernel mount.")
err = cephfsVolume.execMount(dir)
if err != nil {
@ -298,19 +298,19 @@ func (cephfsVolume *cephfs) GetKeyringPath() string {
func (cephfsVolume *cephfs) execMount(mountpoint string) error {
// cephfs mount option
ceph_opt := ""
cephOpt := ""
// override secretfile if secret is provided
if cephfsVolume.secret != "" {
ceph_opt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret
cephOpt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret
} else {
ceph_opt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secret_file
cephOpt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secretFile
}
// build option array
opt := []string{}
if cephfsVolume.readonly {
opt = append(opt, "ro")
}
opt = append(opt, ceph_opt)
opt = append(opt, cephOpt)
// build src like mon1:6789,mon2:6789,mon3:6789:/
hosts := cephfsVolume.mon
@ -323,20 +323,20 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
}
src += hosts[i] + ":" + cephfsVolume.path
mountOptions := util.JoinMountOptions(cephfsVolume.mountOptions, opt)
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", mountOptions); err != nil {
opt = util.JoinMountOptions(cephfsVolume.mountOptions, opt)
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil {
return fmt.Errorf("CephFS: mount failed: %v", err)
}
return nil
}
func (cephfsMounter *cephfsMounter) checkFuseMount() bool {
execute := cephfsMounter.plugin.host.GetExec(cephfsMounter.plugin.GetPluginName())
func (cephfsVolume *cephfsMounter) checkFuseMount() bool {
execute := cephfsVolume.plugin.host.GetExec(cephfsVolume.plugin.GetPluginName())
switch runtime.GOOS {
case "linux":
if _, err := execute.Run("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph"); err == nil {
glog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.")
klog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.")
return true
}
return false
@ -346,12 +346,12 @@ func (cephfsMounter *cephfsMounter) checkFuseMount() bool {
func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
// cephfs keyring file
keyring_file := ""
keyringFile := ""
// override secretfile if secret is provided
if cephfsVolume.secret != "" {
// TODO: cephfs fuse currently doesn't support secret option,
// remove keyring file create once secret option is supported.
glog.V(4).Info("cephfs mount begin using fuse.")
klog.V(4).Info("cephfs mount begin using fuse.")
keyringPath := cephfsVolume.GetKeyringPath()
os.MkdirAll(keyringPath, 0750)
@ -370,20 +370,20 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
writerContext := fmt.Sprintf("cephfuse:%v.keyring", cephfsVolume.id)
writer, err := util.NewAtomicWriter(keyringPath, writerContext)
if err != nil {
glog.Errorf("failed to create atomic writer: %v", err)
klog.Errorf("failed to create atomic writer: %v", err)
return err
}
err = writer.Write(payload)
if err != nil {
glog.Errorf("failed to write payload to dir: %v", err)
klog.Errorf("failed to write payload to dir: %v", err)
return err
}
keyring_file = path.Join(keyringPath, fileName)
keyringFile = path.Join(keyringPath, fileName)
} else {
keyring_file = cephfsVolume.secret_file
keyringFile = cephfsVolume.secretFile
}
// build src like mon1:6789,mon2:6789,mon3:6789:/
@ -399,7 +399,7 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
mountArgs := []string{}
mountArgs = append(mountArgs, "-k")
mountArgs = append(mountArgs, keyring_file)
mountArgs = append(mountArgs, keyringFile)
mountArgs = append(mountArgs, "-m")
mountArgs = append(mountArgs, src)
mountArgs = append(mountArgs, mountpoint)
@ -408,11 +408,22 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
mountArgs = append(mountArgs, "--id")
mountArgs = append(mountArgs, cephfsVolume.id)
glog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs)
// build option array
opt := []string{}
if cephfsVolume.readonly {
opt = append(opt, "ro")
}
opt = util.JoinMountOptions(cephfsVolume.mountOptions, opt)
if len(opt) > 0 {
mountArgs = append(mountArgs, "-o")
mountArgs = append(mountArgs, strings.Join(opt, ","))
}
klog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs)
command := exec.Command("ceph-fuse", mountArgs...)
output, err := command.CombinedOutput()
if err != nil || !(strings.Contains(string(output), "starting fuse")) {
return fmt.Errorf("Ceph-fuse failed: %v\narguments: %s\nOutput: %s\n", err, mountArgs, string(output))
return fmt.Errorf("Ceph-fuse failed: %v\narguments: %s\nOutput: %s", err, mountArgs, string(output))
}
return nil

View File

@ -75,7 +75,6 @@ func TestPlugin(t *testing.T) {
},
},
}
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -223,5 +222,26 @@ func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
err, resultNs, resultName)
}
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
modes := plug.GetAccessModes()
for _, v := range modes {
if !volumetest.ContainsAccessMode(modes, v) {
t.Errorf("Expected AccessModeTypes: %s", v)
}
}
}

View File

@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs contains the internal representation of Ceph file system
// Package cephfs contains the internal representation of Ceph file system
// (CephFS) volumes.
package cephfs // import "k8s.io/kubernetes/pkg/volume/cephfs"

View File

@ -11,27 +11,31 @@ go_library(
srcs = [
"attacher.go",
"cinder.go",
"cinder_block.go",
"cinder_util.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/cinder",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/openstack:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@ -40,19 +44,22 @@ go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"cinder_block_test.go",
"cinder_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -24,10 +24,10 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
@ -40,8 +40,12 @@ type cinderDiskAttacher struct {
var _ volume.Attacher = &cinderDiskAttacher{}
var _ volume.DeviceMounter = &cinderDiskAttacher{}
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
var _ volume.DeviceMountableVolumePlugin = &cinderPlugin{}
const (
probeVolumeInitDelay = 1 * time.Second
probeVolumeFactor = 2.0
@ -67,9 +71,13 @@ func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (plugin *cinderPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
@ -137,31 +145,31 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.Nod
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
if err != nil {
// Log error and continue with attach
glog.Warningf(
klog.Warningf(
"Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v",
volumeID, instanceID, err)
}
if err == nil && attached {
// Volume is already attached to instance.
glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
klog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
} else {
_, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID)
if err == nil {
if err = attacher.waitDiskAttached(instanceID, volumeID); err != nil {
glog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err)
klog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err)
return "", err
}
glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
klog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
} else {
glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
klog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
return "", err
}
}
devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID)
if err != nil {
glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
klog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
return "", err
}
@ -175,7 +183,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
for _, spec := range specs {
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
@ -187,7 +195,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
if err != nil {
// Log error and continue with attach
glog.Errorf(
klog.Errorf(
"Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
@ -197,7 +205,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
@ -223,7 +231,7 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
klog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
probeAttachedVolume()
if !attacher.cinderProvider.ShouldTrustDevicePath() {
// Using the Cinder volume ID, find the real device path (See Issue #33128)
@ -231,11 +239,11 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
}
exists, err := volumeutil.PathExists(devicePath)
if exists && err == nil {
glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
klog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
return devicePath, nil
}
// Log an error, and continue checking periodically
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
klog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
// Using exponential backoff instead of linear
ticker.Stop()
duration = time.Duration(float64(duration) * probeVolumeFactor)
@ -299,6 +307,8 @@ type cinderDiskDetacher struct {
var _ volume.Detacher = &cinderDiskDetacher{}
var _ volume.DeviceUnmounter = &cinderDiskDetacher{}
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
cinder, err := plugin.getCloudProvider()
if err != nil {
@ -310,6 +320,10 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (plugin *cinderPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
backoff := wait.Backoff{
Duration: operationFinishInitDelay,
@ -365,26 +379,26 @@ func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.Nod
attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID)
if err != nil {
// Log error and continue with detach
glog.Errorf(
klog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volumeID, nodeName, err)
}
if err == nil && !attached {
// Volume is already detached from node.
glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
return nil
}
if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil {
glog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err)
klog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err)
return err
}
if err = detacher.waitDiskDetached(instanceID, volumeID); err != nil {
glog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err)
klog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err)
return err
}
glog.Infof("detached volume %q from node %q", volumeID, nodeName)
klog.Infof("detached volume %q from node %q", volumeID, nodeName)
return nil
}

View File

@ -24,15 +24,15 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"fmt"
"sort"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
)
const (
@ -468,7 +468,7 @@ func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error
return "", errors.New("unexpected AttachDisk call: wrong instanceID")
}
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
klog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret)
testcase.attachOrDetach = &attachStatus
return expected.retDeviceName, expected.ret
@ -494,7 +494,7 @@ func (testcase *testcase) DetachDisk(instanceID, volumeID string) error {
return errors.New("unexpected DetachDisk call: wrong instanceID")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret)
testcase.attachOrDetach = &detachStatus
return expected.ret
@ -504,11 +504,11 @@ func (testcase *testcase) OperationPending(diskName string) (bool, string, error
expected := &testcase.operationPending
if expected.volumeStatus == VolumeStatusPending {
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
klog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
return true, expected.volumeStatus, expected.ret
}
glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
klog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret)
return false, expected.volumeStatus, expected.ret
}
@ -542,7 +542,7 @@ func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, err
return false, errors.New("unexpected DiskIsAttached call: wrong instanceID")
}
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret
}
@ -566,7 +566,7 @@ func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (st
return "", errors.New("unexpected GetAttachmentDiskPath call: wrong instanceID")
}
glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
klog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret)
return expected.retPath, expected.ret
}
@ -610,13 +610,13 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID")
}
glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
klog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
return expected.isAttached, expected.instanceID, expected.ret
}
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
return "", "", false, errors.New("Not implemented")
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) {
return "", "", "", false, errors.New("Not implemented")
}
func (testcase *testcase) GetDevicePath(volumeID string) string {
@ -664,7 +664,7 @@ func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string
return areAttached, errors.New("Unexpected DisksAreAttached call: wrong instanceID")
}
glog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret)
klog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret)
return expected.areAttached, expected.ret
}
@ -694,7 +694,7 @@ func (testcase *testcase) DisksAreAttachedByName(nodeName types.NodeName, volume
return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong instanceID")
}
glog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret)
klog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret)
return expected.areAttached, expected.ret
}

View File

@ -22,13 +22,15 @@ import (
"os"
"path"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
utilfeature "k8s.io/apiserver/pkg/util/feature"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
@ -51,7 +53,7 @@ type BlockStorageProvider interface {
AttachDisk(instanceID, volumeID string) (string, error)
DetachDisk(instanceID, volumeID string) error
DeleteVolume(volumeID string) error
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error)
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error)
GetDevicePath(volumeID string) string
InstanceID() (string, error)
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
@ -85,7 +87,7 @@ func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
func (plugin *cinderPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.volumeLocks = keymutex.NewKeyMutex()
plugin.volumeLocks = keymutex.NewHashed(0)
return nil
}
@ -146,7 +148,9 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
},
fsType: fsType,
readOnly: readOnly,
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
mountOptions: util.MountOptionFromSpec(spec),
}, nil
}
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
@ -230,7 +234,7 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*
if err != nil {
return nil, err
}
glog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath)
klog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath)
cinderVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
@ -259,10 +263,17 @@ func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resour
return oldSize, err
}
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value()))
klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value()))
return expandedSize, nil
}
func (plugin *cinderPlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error {
_, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath)
return err
}
var _ volume.FSResizableVolumePlugin = &cinderPlugin{}
func (plugin *cinderPlugin) RequiresFSResize() bool {
return true
}
@ -274,7 +285,7 @@ type cdManager interface {
// Detaches the disk from the kubelet's host machine.
DetachDisk(unmounter *cinderVolumeUnmounter) error
// Creates a volume
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
CreateVolume(provisioner *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *cinderVolumeDeleter) error
}
@ -286,6 +297,7 @@ type cinderVolumeMounter struct {
fsType string
readOnly bool
blockDeviceMounter *mount.SafeFormatAndMount
mountOptions []string
}
// cinderPersistentDisk volumes are disk resources provided by C3
@ -330,18 +342,18 @@ func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error {
// SetUp bind mounts to the volume path.
func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
klog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
b.plugin.volumeLocks.LockKey(b.pdName)
defer b.plugin.volumeLocks.UnlockKey(b.pdName)
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("Cannot validate mount point: %s %v", dir, err)
klog.Errorf("Cannot validate mount point: %s %v", dir, err)
return err
}
if !notmnt {
glog.V(4).Infof("Something is already mounted to target %s", dir)
klog.V(4).Infof("Something is already mounted to target %s", dir)
return nil
}
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
@ -352,45 +364,46 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
klog.V(4).Infof("Could not create directory %s: %v", dir, err)
return err
}
mountOptions := util.JoinMountOptions(options, b.mountOptions)
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, options)
klog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, mountOptions)
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
glog.V(4).Infof("Mount failed: %v", err)
klog.V(4).Infof("Mount failed: %v", err)
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
klog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
return err
}
}
os.Remove(dir)
glog.Errorf("Failed to mount %s: %v", dir, err)
klog.Errorf("Failed to mount %s: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir)
klog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir)
return nil
}
@ -419,60 +432,60 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
glog.V(5).Infof("Cinder TearDown of %s", dir)
klog.V(5).Infof("Cinder TearDown of %s", dir)
notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
klog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err)
return err
}
if notmnt {
glog.V(4).Infof("Nothing is mounted to %s, ignoring", dir)
klog.V(4).Infof("Nothing is mounted to %s, ignoring", dir)
return os.Remove(dir)
}
// Find Cinder volumeID to lock the right volume
// TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like
// NewMounter. We could then find volumeID there without probing MountRefs.
refs, err := mount.GetMountRefs(c.mounter, dir)
refs, err := c.mounter.GetMountRefs(dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
klog.V(4).Infof("GetMountRefs failed: %v", err)
return err
}
if len(refs) == 0 {
glog.V(4).Infof("Directory %s is not mounted", dir)
klog.V(4).Infof("Directory %s is not mounted", dir)
return fmt.Errorf("directory %s is not mounted", dir)
}
c.pdName = path.Base(refs[0])
glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)
klog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir)
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
c.plugin.volumeLocks.LockKey(c.pdName)
defer c.plugin.volumeLocks.UnlockKey(c.pdName)
// Reload list of references, there might be SetUpAt finished in the meantime
refs, err = mount.GetMountRefs(c.mounter, dir)
refs, err = c.mounter.GetMountRefs(dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
klog.V(4).Infof("GetMountRefs failed: %v", err)
return err
}
if err := c.mounter.Unmount(dir); err != nil {
glog.V(4).Infof("Unmount failed: %v", err)
klog.V(4).Infof("Unmount failed: %v", err)
return err
}
glog.V(3).Infof("Successfully unmounted: %s\n", dir)
klog.V(3).Infof("Successfully unmounted: %s\n", dir)
notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notmnt {
if err := os.Remove(dir); err != nil {
glog.V(4).Infof("Failed to remove directory after unmount: %v", err)
klog.V(4).Infof("Failed to remove directory after unmount: %v", err)
return err
}
}
@ -505,15 +518,20 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies)
if err != nil {
return nil, err
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = c.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fstype = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
@ -528,6 +546,7 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: volumeID,
@ -542,6 +561,21 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
requirements := make([]v1.NodeSelectorRequirement, 0)
for k, v := range labels {
if v != "" {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}})
}
}
if len(requirements) > 0 {
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements
}
}
return pv, nil
}

View File

@ -0,0 +1,167 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"fmt"
"path/filepath"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
var _ volume.VolumePlugin = &cinderPlugin{}
var _ volume.PersistentVolumePlugin = &cinderPlugin{}
var _ volume.BlockVolumePlugin = &cinderPlugin{}
var _ volume.DeletableVolumePlugin = &cinderPlugin{}
var _ volume.ProvisionableVolumePlugin = &cinderPlugin{}
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
func (plugin *cinderPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(cinderVolumePluginName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
return nil, err
}
klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) <= 1 {
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
}
return getVolumeSpecFromGlobalMapPath(globalMapPath)
}
func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) {
// Get volume spec information from globalMapPath
// globalMapPath example:
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
// plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX
vID := filepath.Base(globalMapPath)
if len(vID) <= 1 {
return nil, fmt.Errorf("failed to get volumeID from global path=%s", globalMapPath)
}
block := v1.PersistentVolumeBlock
cinderVolume := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: vID,
},
},
VolumeMode: &block,
},
}
return volume.NewSpecFromPersistentVolume(cinderVolume, true), nil
}
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
func (plugin *cinderPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
// Pass empty string as dummy uid since uid isn't used in the case.
var uid types.UID
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
pdName, fsType, readOnly, err := getVolumeInfo(spec)
if err != nil {
return nil, err
}
return &cinderVolumeMapper{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
fsType: fsType,
manager: manager,
mounter: mounter,
plugin: plugin,
},
readOnly: readOnly}, nil
}
func (plugin *cinderPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &DiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newUnmapperInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) {
return &cinderPluginUnmapper{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}}, nil
}
func (c *cinderPluginUnmapper) TearDownDevice(mapPath, devicePath string) error {
return nil
}
type cinderPluginUnmapper struct {
*cinderVolume
}
var _ volume.BlockVolumeUnmapper = &cinderPluginUnmapper{}
type cinderVolumeMapper struct {
*cinderVolume
readOnly bool
}
var _ volume.BlockVolumeMapper = &cinderVolumeMapper{}
func (b *cinderVolumeMapper) SetUpDevice() (string, error) {
return "", nil
}
func (b *cinderVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
// plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX
func (cd *cinderVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
pdName, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
return filepath.Join(cd.plugin.host.GetVolumeDevicePluginDir(cinderVolumePluginName), pdName), nil
}
// GetPodDeviceMapPath returns pod device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~cinder
func (cd *cinderVolume) GetPodDeviceMapPath() (string, string) {
name := cinderVolumePluginName
return cd.plugin.host.GetPodVolumeDeviceDir(cd.podUID, kstrings.EscapeQualifiedNameForDisk(name)), cd.volName
}

View File

@ -0,0 +1,145 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
testVolName = "vol-1234"
testPVName = "pv1"
testGlobalPath = "plugins/kubernetes.io/cinder/volumeDevices/vol-1234"
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~cinder"
)
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// make our test path for fake GlobalMapPath
// /tmp symbolized our pluginDir
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/cinder/volumeDevices/pdVol1
tmpVDir, err := utiltesting.MkTmpdir("cinderBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
//Bad Path
badspec, err := getVolumeSpecFromGlobalMapPath("")
if badspec != nil || err == nil {
t.Errorf("Expected not to get spec from GlobalMapPath but did")
}
// Good Path
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath)
if spec == nil || err != nil {
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
}
if spec.PersistentVolume.Spec.Cinder.VolumeID != testVolName {
t.Errorf("Invalid volumeID from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.Cinder.VolumeID)
}
block := v1.PersistentVolumeBlock
specMode := spec.PersistentVolume.Spec.VolumeMode
if &specMode == nil {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", &specMode, block)
}
if *specMode != block {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
}
}
func getTestVolume(readOnly bool, isBlock bool) *volume.Spec {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: testPVName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: testVolName,
},
},
},
}
if isBlock {
blockMode := v1.PersistentVolumeBlock
pv.Spec.VolumeMode = &blockMode
}
return volume.NewSpecFromPersistentVolume(pv, readOnly)
}
func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("cinderBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
expectedPodPath := path.Join(tmpVDir, testPodPath)
spec := getTestVolume(false, true /*isBlock*/)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
plug, err := plugMgr.FindMapperPluginByName(cinderVolumePluginName)
if err != nil {
os.RemoveAll(tmpVDir)
t.Fatalf("Can't find the plugin by name: %q", cinderVolumePluginName)
}
if plug.GetPluginName() != cinderVolumePluginName {
t.Fatalf("Wrong name: %s", plug.GetPluginName())
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mapper == nil {
t.Fatalf("Got a nil Mounter")
}
//GetGlobalMapPath
gMapPath, err := mapper.GetGlobalMapPath(spec)
if err != nil || len(gMapPath) == 0 {
t.Fatalf("Invalid GlobalMapPath from spec: %s", spec.PersistentVolume.Spec.Cinder.VolumeID)
}
if gMapPath != expectedGlobalPath {
t.Errorf("Failed to get GlobalMapPath: %s %s", gMapPath, expectedGlobalPath)
}
//GetPodDeviceMapPath
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
if gDevicePath != expectedPodPath {
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
}
if gVolName != testPVName {
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
}
}

View File

@ -26,6 +26,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -116,8 +117,10 @@ func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
return nil
}
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
return "test-volume-name", 1, nil, "", nil
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
labels = make(map[string]string)
labels[kubeletapis.LabelZoneFailureDomain] = "nova"
return "test-volume-name", 1, labels, "", nil
}
func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error {
@ -192,7 +195,7 @@ func TestPlugin(t *testing.T) {
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
@ -210,6 +213,39 @@ func TestPlugin(t *testing.T) {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
// check nodeaffinity members
if persistentSpec.Spec.NodeAffinity == nil {
t.Errorf("Provision() returned unexpected nil NodeAffinity")
}
if persistentSpec.Spec.NodeAffinity.Required == nil {
t.Errorf("Provision() returned unexpected nil NodeAffinity.Required")
}
n := len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms)
if n != 1 {
t.Errorf("Provision() returned unexpected number of NodeSelectorTerms %d. Expected %d", n, 1)
}
n = len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions)
if n != 1 {
t.Errorf("Provision() returned unexpected number of MatchExpressions %d. Expected %d", n, 1)
}
req := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0]
if req.Key != kubeletapis.LabelZoneFailureDomain {
t.Errorf("Provision() returned unexpected requirement key in NodeAffinity %v", req.Key)
}
if req.Operator != v1.NodeSelectorOpIn {
t.Errorf("Provision() returned unexpected requirement operator in NodeAffinity %v", req.Operator)
}
if len(req.Values) != 1 || req.Values[0] != "nova" {
t.Errorf("Provision() returned unexpected requirement value in NodeAffinity %v", req.Values)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,

View File

@ -24,8 +24,8 @@ import (
"strings"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
@ -95,7 +95,7 @@ func (util *DiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) er
os.Remove(globalPDPath)
return err
}
glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
klog.V(2).Infof("Safe mount successful: %q\n", devicePath)
}
return nil
}
@ -109,7 +109,7 @@ func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
if err := os.Remove(globalPDPath); err != nil {
return err
}
glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
klog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
cloud, err := cd.plugin.getCloudProvider()
if err != nil {
@ -122,7 +122,7 @@ func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
if err = cloud.DetachDisk(instanceid, cd.pdName); err != nil {
return err
}
glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
klog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
return nil
}
@ -136,10 +136,10 @@ func (util *DiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error {
if err = cloud.DeleteVolume(cd.pdName); err != nil {
// OpenStack cloud provider returns volume.tryAgainError when necessary,
// no handling needed here.
glog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err)
klog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err)
return err
}
glog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName)
klog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName)
return nil
}
@ -149,7 +149,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
zones := make(sets.String)
nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
glog.V(2).Infof("Error listing nodes")
klog.V(2).Infof("Error listing nodes")
return zones, err
}
for _, node := range nodes.Items {
@ -157,21 +157,24 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
zones.Insert(zone)
}
}
glog.V(4).Infof("zones found: %v", zones)
klog.V(4).Infof("zones found: %v", zones)
return zones, nil
}
// CreateVolume uses the cloud provider entrypoint for creating a volume
func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
cloud, err := c.plugin.getCloudProvider()
if err != nil {
return "", 0, nil, "", err
}
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
volSizeGiB, err := volutil.RoundUpToGiBInt(capacity)
if err != nil {
return "", 0, nil, "", err
}
name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
vtype := ""
availability := ""
@ -198,29 +201,38 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
// No zone specified, choose one randomly in the same region
zones, err := getZonesFromNodes(c.plugin.host.GetKubeClient())
if err != nil {
glog.V(2).Infof("error getting zone information: %v", err)
klog.V(2).Infof("error getting zone information: %v", err)
return "", 0, nil, "", err
}
// if we did not get any zones, lets leave it blank and gophercloud will
// use zone "nova" as default
if len(zones) > 0 {
availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
availability, err = volutil.SelectZoneForVolume(false, false, "", nil, zones, node, allowedTopologies, c.options.PVC.Name)
if err != nil {
klog.V(2).Infof("error selecting zone for volume: %v", err)
return "", 0, nil, "", err
}
}
}
volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
if errr != nil {
glog.V(2).Infof("Error creating cinder volume: %v", errr)
return "", 0, nil, "", errr
volumeID, volumeAZ, volumeRegion, IgnoreVolumeAZ, err := cloud.CreateVolume(name, volSizeGiB, vtype, availability, c.options.CloudTags)
if err != nil {
klog.V(2).Infof("Error creating cinder volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created cinder volume %s", volumeID)
klog.V(2).Infof("Successfully created cinder volume %s", volumeID)
// these are needed that pod is spawning to same AZ
volumeLabels = make(map[string]string)
if IgnoreVolumeAZ == false {
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
if volumeAZ != "" {
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
}
if volumeRegion != "" {
volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion
}
}
return volumeID, volSizeGB, volumeLabels, fstype, nil
return volumeID, volSizeGiB, volumeLabels, fstype, nil
}
func probeAttachedVolume() error {
@ -236,17 +248,17 @@ func probeAttachedVolume() error {
cmdSettle := executor.Command("udevadm", argsSettle...)
_, errSettle := cmdSettle.CombinedOutput()
if errSettle != nil {
glog.Errorf("error running udevadm settle %v\n", errSettle)
klog.Errorf("error running udevadm settle %v\n", errSettle)
}
args := []string{"trigger"}
cmd := executor.Command("udevadm", args...)
_, err := cmd.CombinedOutput()
if err != nil {
glog.Errorf("error running udevadm trigger %v\n", err)
klog.Errorf("error running udevadm trigger %v\n", err)
return err
}
glog.V(4).Infof("Successfully probed all attachments")
klog.V(4).Infof("Successfully probed all attachments")
return nil
}

View File

@ -14,16 +14,15 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/volume/configmap",
deps = [
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -33,14 +32,14 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/volume:go_default_library",
"//pkg/volume/empty_dir:go_default_library",
"//pkg/volume/emptydir:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@ -19,19 +19,18 @@ package configmap
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ioutil "k8s.io/kubernetes/pkg/util/io"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// ProbeVolumePlugin is the entry point for plugin detection in a package.
// ProbeVolumePlugins is the entry point for plugin detection in a package.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&configMapPlugin{}}
}
@ -93,7 +92,6 @@ func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts v
pod.UID,
plugin,
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.MetricsNil{},
},
source: *spec.Volume.ConfigMap,
@ -110,7 +108,6 @@ func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (v
podUID,
plugin,
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.MetricsNil{},
},
}, nil
@ -131,7 +128,6 @@ type configMapVolume struct {
podUID types.UID
plugin *configMapPlugin
mounter mount.Interface
writer ioutil.Writer
volume.MetricsNil
}
@ -184,7 +180,7 @@ func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
}
func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
klog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
// Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts)
@ -196,7 +192,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
configMap, err := b.getConfigMap(b.pod.Namespace, b.source.Name)
if err != nil {
if !(errors.IsNotFound(err) && optional) {
glog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err)
klog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err)
return err
}
configMap = &v1.ConfigMap{
@ -207,15 +203,8 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
}
}
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
return err
}
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
return err
}
totalBytes := totalBytes(configMap)
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
klog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
b.pod.Namespace,
b.source.Name,
len(configMap.Data)+len(configMap.BinaryData),
@ -226,28 +215,52 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
return err
}
setupSuccess := false
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
return err
}
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
return err
}
defer func() {
// Clean up directories if setup fails
if !setupSuccess {
unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID)
if unmountCreateErr != nil {
klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr)
return
}
tearDownErr := unmounter.TearDown()
if tearDownErr != nil {
klog.Errorf("Error tearing down volume %s with : %v", b.volName, tearDownErr)
}
}
}()
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
if err != nil {
glog.Errorf("Error creating atomic writer: %v", err)
klog.Errorf("Error creating atomic writer: %v", err)
return err
}
err = writer.Write(payload)
if err != nil {
glog.Errorf("Error writing payload to dir: %v", err)
klog.Errorf("Error writing payload to dir: %v", err)
return err
}
err = volume.SetVolumeOwnership(b, fsGroup)
if err != nil {
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
return err
}
setupSuccess = true
return nil
}
// Note: this function is exported so that it can be called from the projection volume driver
// MakePayload function is exported so that it can be called from the projection volume driver
func MakePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) {
if defaultMode == nil {
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")

View File

@ -31,7 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/empty_dir"
"k8s.io/kubernetes/pkg/volume/emptydir"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
@ -297,7 +297,7 @@ func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.Vo
t.Fatalf("can't make a temp rootdir: %v", err)
}
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, emptydir.ProbeVolumePlugins())
}
func TestCanSupport(t *testing.T) {
@ -613,6 +613,66 @@ func volumeSpec(volumeName, configMapName string, defaultMode int32) *v1.Volume
}
}
func TestInvalidConfigMapSetup(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid")
testVolumeName = "test_volume_name"
testNamespace = "test_configmap_namespace"
testName = "test_configmap_name"
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
configMap = configMap(testNamespace, testName)
client = fake.NewSimpleClientset(&configMap)
pluginMgr = volume.VolumePluginMgr{}
tempDir, host = newTestHost(t, client)
)
volumeSpec.VolumeSource.ConfigMap.Items = []v1.KeyToPath{
{Key: "missing", Path: "missing"},
}
defer os.RemoveAll(tempDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
if err != nil {
t.Errorf("Failed to GetVolumeName: %v", err)
}
if vName != "test_volume_name/test_configmap_name" {
t.Errorf("Got unexpect VolumeName %v", vName)
}
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
fsGroup := int64(1001)
err = mounter.SetUp(&fsGroup)
if err == nil {
t.Errorf("Expected setup to fail")
}
_, err = os.Stat(volumePath)
if err == nil {
t.Errorf("Expected %s to not exist", volumePath)
}
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
func configMap(namespace, name string) v1.ConfigMap {
return v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{

View File

@ -14,22 +14,27 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/csi/labelmanager:go_default_library",
"//pkg/volume/csi/csiv0:go_default_library",
"//pkg/volume/csi/nodeinfomanager:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -41,26 +46,34 @@ go_test(
"csi_client_test.go",
"csi_mounter_test.go",
"csi_plugin_test.go",
"main_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/csi/fake:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -75,8 +88,9 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/volume/csi/csiv0:all-srcs",
"//pkg/volume/csi/fake:all-srcs",
"//pkg/volume/csi/labelmanager:all-srcs",
"//pkg/volume/csi/nodeinfomanager:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],

View File

@ -27,9 +27,8 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/klog"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -56,18 +55,30 @@ type csiAttacher struct {
// volume.Attacher methods
var _ volume.Attacher = &csiAttacher{}
var _ volume.DeviceMounter = &csiAttacher{}
func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
if spec == nil {
glog.Error(log("attacher.Attach missing volume.Spec"))
klog.Error(log("attacher.Attach missing volume.Spec"))
return "", errors.New("missing spec")
}
csiSource, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
klog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
return "", err
}
skip, err := c.plugin.skipAttach(csiSource.Driver)
if err != nil {
klog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err))
return "", err
}
if skip {
klog.V(4).Infof(log("skipping attach for driver %s", csiSource.Driver))
return "", nil
}
node := string(nodeName)
pvName := spec.PersistentVolume.GetName()
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
@ -83,46 +94,58 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{Attached: false},
}
_, err = c.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
alreadyExist := false
if err != nil {
if !apierrs.IsAlreadyExists(err) {
glog.Error(log("attacher.Attach failed: %v", err))
klog.Error(log("attacher.Attach failed: %v", err))
return "", err
}
alreadyExist = true
}
if alreadyExist {
glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
klog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
} else {
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
klog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
}
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
return "", err
}
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
klog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
// TODO(71164): In 1.15, return empty devicePath
return attachID, nil
}
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) {
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, _ string, pod *v1.Pod, timeout time.Duration) (string, error) {
source, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
klog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
return "", err
}
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(c.plugin.host.GetNodeName()))
skip, err := c.plugin.skipAttach(source.Driver)
if err != nil {
klog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err))
return "", err
}
if skip {
klog.V(4).Infof(log("Driver is not attachable, skip waiting for attach"))
return "", nil
}
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
}
func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) {
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
defer timer.Stop()
@ -131,27 +154,19 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim
}
func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) {
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err)
}
successful, err := verifyAttachmentStatus(attach, volumeHandle)
if err != nil {
return "", err
}
// if being deleted, fail fast
if attach.GetDeletionTimestamp() != nil {
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
return "", errors.New("volume attachment is being deleted")
}
// attachment OK
if attach.Status.Attached {
if successful {
return attachID, nil
}
// driver reports attach error
attachErr := attach.Status.AttachError
if attachErr != nil {
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return "", errors.New(attachErr.Message)
}
watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
if err != nil {
@ -165,31 +180,23 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str
select {
case event, ok := <-ch:
if !ok {
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
return "", errors.New("volume attachment watch channel had been closed")
}
switch event.Type {
case watch.Added, watch.Modified:
attach, _ := event.Object.(*storage.VolumeAttachment)
// if being deleted, fail fast
if attach.GetDeletionTimestamp() != nil {
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
return "", errors.New("volume attachment is being deleted")
successful, err := verifyAttachmentStatus(attach, volumeHandle)
if err != nil {
return "", err
}
// attachment OK
if attach.Status.Attached {
if successful {
return attachID, nil
}
// driver reports attach error
attachErr := attach.Status.AttachError
if attachErr != nil {
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return "", errors.New(attachErr.Message)
}
case watch.Deleted:
// if deleted, fail fast
glog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID))
klog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID))
return "", errors.New("volume attachment has been deleted")
case watch.Error:
@ -198,36 +205,66 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str
}
case <-timer.C:
glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
klog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle)
}
}
}
func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) {
// if being deleted, fail fast
if attachment.GetDeletionTimestamp() != nil {
klog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachment.Name))
return false, errors.New("volume attachment is being deleted")
}
// attachment OK
if attachment.Status.Attached {
return true, nil
}
// driver reports attach error
attachErr := attachment.Status.AttachError
if attachErr != nil {
klog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return false, errors.New(attachErr.Message)
}
return false, nil
}
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
klog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
attached := make(map[*volume.Spec]bool)
for _, spec := range specs {
if spec == nil {
glog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
klog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
return nil, errors.New("missing spec")
}
source, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
klog.Error(log("attacher.VolumesAreAttached failed: %v", err))
continue
}
skip, err := c.plugin.skipAttach(source.Driver)
if err != nil {
klog.Error(log("Failed to check CSIDriver for %s: %s", source.Driver, err))
} else {
if skip {
// This volume is not attachable, pretend it's attached
attached[spec] = true
continue
}
}
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
attached[spec] = false
klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
continue
}
glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
klog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
attached[spec] = attach.Status.Attached
}
@ -235,27 +272,32 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
}
func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
glog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec))
klog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec))
deviceMountPath, err := makeDeviceMountPath(c.plugin, spec)
if err != nil {
glog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err))
klog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err))
return "", err
}
glog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath)
klog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath)
return deviceMountPath, nil
}
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) (err error) {
glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
if deviceMountPath == "" {
err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
return err
}
mounted, err := isDirMounted(c.plugin, deviceMountPath)
if err != nil {
glog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath))
klog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath))
return err
}
if mounted {
glog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath))
klog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath))
return nil
}
@ -265,77 +307,64 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
}
csiSource, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err))
klog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err))
return err
}
// Store volume metadata for UnmountDevice. Keep it around even if the
// driver does not support NodeStage, UnmountDevice still needs it.
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
klog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
klog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
dataDir := filepath.Dir(deviceMountPath)
data := map[string]string{
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
}
if err = saveVolumeData(dataDir, volDataFileName, data); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
klog.Error(log("failed to save volume info data: %v", err))
if cleanerr := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr))
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr))
}
return err
}
defer func() {
if err != nil {
// clean up metadata
glog.Errorf(log("attacher.MountDevice failed: %v", err))
klog.Errorf(log("attacher.MountDevice failed: %v", err))
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
glog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err))
klog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err))
}
}
}()
if c.csiClient == nil {
c.csiClient = newCsiDriverClient(csiSource.Driver)
c.csiClient, err = newCsiDriverClient(csiDriverName(csiSource.Driver))
if err != nil {
klog.Errorf(log("attacher.MountDevice failed to create newCsiDriverClient: %v", err))
return err
}
}
csi := c.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
return err
}
if !stageUnstageSet {
glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
return nil
}
// Start MountDevice
if deviceMountPath == "" {
err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
return err
}
nodeName := string(c.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
return err // This err already has enough context ("VolumeAttachment xyz not found")
}
if attachment == nil {
err = errors.New("no existing VolumeAttachment found")
return err
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
publishContext, err := c.plugin.getPublishContext(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName)
nodeStageSecrets := map[string]string{}
if csiSource.NodeStageSecretRef != nil {
@ -356,7 +385,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
fsType := csiSource.FSType
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
publishContext,
deviceMountPath,
fsType,
accessMode,
@ -367,21 +396,23 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
return err
}
glog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
klog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
return nil
}
var _ volume.Detacher = &csiAttacher{}
var _ volume.DeviceUnmounter = &csiAttacher{}
func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
if volumeName == "" {
glog.Error(log("detacher.Detach missing value for parameter volumeName"))
klog.Error(log("detacher.Detach missing value for parameter volumeName"))
return errors.New("missing expected parameter volumeName")
}
parts := strings.Split(volumeName, volNameSep)
if len(parts) != 2 {
glog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
klog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
return errors.New("volumeName missing expected data")
}
@ -391,19 +422,19 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil {
if apierrs.IsNotFound(err) {
// object deleted or never existed, done
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
return nil
}
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
klog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
return err
}
glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
return c.waitForVolumeDetachment(volID, attachID)
}
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error {
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
timeout := c.waitSleepTime * 10
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
@ -413,21 +444,21 @@ func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) err
}
func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error {
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
//object deleted or never existed, done
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
return nil
}
glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
klog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
return err
}
// driver reports attach error
detachErr := attach.Status.DetachError
if detachErr != nil {
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
return errors.New(detachErr.Message)
}
@ -442,7 +473,7 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str
select {
case event, ok := <-ch:
if !ok {
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
return errors.New("volume attachment watch channel had been closed")
}
@ -452,12 +483,12 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str
// driver reports attach error
detachErr := attach.Status.DetachError
if detachErr != nil {
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
return errors.New(detachErr.Message)
}
case watch.Deleted:
//object deleted
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle))
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle))
return nil
case watch.Error:
@ -466,14 +497,14 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str
}
case <-timer.C:
glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
klog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
return fmt.Errorf("detachment timeout for volume %v", volumeHandle)
}
}
}
func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
klog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
// Setup
var driverName, volID string
@ -483,31 +514,35 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
driverName = data[volDataKey.driverName]
volID = data[volDataKey.volHandle]
} else {
glog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err))
klog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err))
// The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server
driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
return err
}
}
if c.csiClient == nil {
c.csiClient = newCsiDriverClient(driverName)
c.csiClient, err = newCsiDriverClient(csiDriverName(driverName))
if err != nil {
klog.Errorf(log("attacher.UnmountDevice failed to create newCsiDriverClient: %v", err))
return err
}
}
csi := c.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
klog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
return err
}
if !stageUnstageSet {
glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
klog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
// Just delete the global directory + json file
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
@ -522,7 +557,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
deviceMountPath)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed: %v", err))
klog.Errorf(log("attacher.UnmountDevice failed: %v", err))
return err
}
@ -531,28 +566,10 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
}
glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
klog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
return nil
}
func hasStageUnstageCapability(ctx context.Context, csi csiClient) (bool, error) {
capabilities, err := csi.NodeGetCapabilities(ctx)
if err != nil {
return false, err
}
stageUnstageSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
stageUnstageSet = true
}
}
return stageUnstageSet, nil
}
// getAttachmentName returns csi-<sha252(volName,csiDriverName,NodeName>
func getAttachmentName(volName, csiDriverName, nodeName string) string {
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))

View File

@ -29,14 +29,26 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes"
fakeclient "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
utiltesting "k8s.io/client-go/util/testing"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
var (
bFalse = false
bTrue = true
)
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
return &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
@ -57,6 +69,40 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach
}
}
func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.RaceFreeFakeWatcher, attachID string, status storage.VolumeAttachmentStatus) {
ticker := time.NewTicker(10 * time.Millisecond)
var attach *storage.VolumeAttachment
var err error
defer ticker.Stop()
// wait for attachment to be saved
for i := 0; i < 100; i++ {
attach, err = client.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
<-ticker.C
continue
}
t.Error(err)
}
if attach != nil {
klog.Infof("stopping wait")
break
}
}
klog.Infof("stopped wait")
if attach == nil {
t.Logf("attachment not found for id:%v", attachID)
} else {
attach.Status = status
_, err := client.StorageV1beta1().VolumeAttachments().Update(attach)
if err != nil {
t.Error(err)
}
watch.Modify(attach)
}
}
func TestAttacherAttach(t *testing.T) {
testCases := []struct {
@ -120,8 +166,7 @@ func TestAttacherAttach(t *testing.T) {
// attacher loop
for i, tc := range testCases {
t.Logf("test case: %s", tc.name)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -146,42 +191,211 @@ func TestAttacherAttach(t *testing.T) {
}
}(tc.attachID, tc.nodeName, tc.shouldFail)
// update attachment to avoid long waitForAttachment
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
// wait for attachment to be saved
var attach *storage.VolumeAttachment
for i := 0; i < 100; i++ {
attach, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
<-ticker.C
continue
}
t.Error(err)
var status storage.VolumeAttachmentStatus
if tc.injectAttacherError {
status.Attached = false
status.AttachError = &storage.VolumeError{
Message: "attacher error",
}
if attach != nil {
break
}
}
if attach == nil {
t.Logf("attachment not found for id:%v", tc.attachID)
} else {
if tc.injectAttacherError {
attach.Status.Attached = false
attach.Status.AttachError = &storage.VolumeError{
Message: "attacher error",
}
} else {
attach.Status.Attached = true
}
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
if err != nil {
t.Error(err)
}
fakeWatcher.Modify(attach)
status.Attached = true
}
markVolumeAttached(t, csiAttacher.k8s, fakeWatcher, tc.attachID, status)
}
}
func TestAttacherWithCSIDriver(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, true)()
tests := []struct {
name string
driver string
expectVolumeAttachment bool
}{
{
name: "CSIDriver not attachable",
driver: "not-attachable",
expectVolumeAttachment: false,
},
{
name: "CSIDriver is attachable",
driver: "attachable",
expectVolumeAttachment: true,
},
{
name: "CSIDriver.AttachRequired not set -> failure",
driver: "nil",
expectVolumeAttachment: true,
},
{
name: "CSIDriver does not exist not set -> failure",
driver: "unknown",
expectVolumeAttachment: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("not-attachable", nil, &bFalse),
getCSIDriver("attachable", nil, &bTrue),
getCSIDriver("nil", nil, nil),
)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, fakeCSIClient)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
expectedAttachID := getAttachmentName("test-vol", test.driver, "node")
status := storage.VolumeAttachmentStatus{
Attached: true,
}
if test.expectVolumeAttachment {
go markVolumeAttached(t, csiAttacher.k8s, fakeWatcher, expectedAttachID, status)
}
attachID, err := csiAttacher.Attach(spec, types.NodeName("node"))
if err != nil {
t.Errorf("Attach() failed: %s", err)
}
if test.expectVolumeAttachment && attachID == "" {
t.Errorf("Epected attachID, got nothing")
}
if !test.expectVolumeAttachment && attachID != "" {
t.Errorf("Epected empty attachID, got %q", attachID)
}
})
}
}
func TestAttacherWaitForVolumeAttachmentWithCSIDriver(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, true)()
// In order to detect if the volume plugin would skip WaitForAttach for non-attachable drivers,
// we do not instantiate any VolumeAttachment. So if the plugin does not skip attach, WaitForVolumeAttachment
// will return an error that volume attachment was not found.
tests := []struct {
name string
driver string
expectError bool
}{
{
name: "CSIDriver not attachable -> success",
driver: "not-attachable",
expectError: false,
},
{
name: "CSIDriver is attachable -> failure",
driver: "attachable",
expectError: true,
},
{
name: "CSIDriver.AttachRequired not set -> failure",
driver: "nil",
expectError: true,
},
{
name: "CSIDriver does not exist not set -> failure",
driver: "unknown",
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("not-attachable", nil, &bFalse),
getCSIDriver("attachable", nil, &bTrue),
getCSIDriver("nil", nil, nil),
)
plug, tmpDir := newTestPlugin(t, nil, fakeCSIClient)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
_, err = csiAttacher.WaitForAttach(spec, "", nil, time.Second)
if err != nil && !test.expectError {
t.Errorf("Unexpected error: %s", err)
}
if err == nil && test.expectError {
t.Errorf("Expected error, got none")
}
})
}
}
func TestAttacherWaitForAttach(t *testing.T) {
tests := []struct {
name string
driver string
makeAttachment func() *storage.VolumeAttachment
expectedAttachID string
expectError bool
}{
{
name: "successful attach",
driver: "attachable",
makeAttachment: func() *storage.VolumeAttachment {
testAttachID := getAttachmentName("test-vol", "attachable", "node")
successfulAttachment := makeTestAttachment(testAttachID, "node", "test-pv")
successfulAttachment.Status.Attached = true
return successfulAttachment
},
expectedAttachID: getAttachmentName("test-vol", "attachable", "node"),
expectError: false,
},
{
name: "failed attach",
driver: "attachable",
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
if test.makeAttachment != nil {
attachment := test.makeAttachment()
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to create VolumeAttachment: %v", err)
}
gotAttachment, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(attachment.Name, meta.GetOptions{})
if err != nil {
t.Fatalf("failed to get created VolumeAttachment: %v", err)
}
t.Logf("created test VolumeAttachment %+v", gotAttachment)
}
attachID, err := csiAttacher.WaitForAttach(spec, "", nil, time.Second)
if err != nil && !test.expectError {
t.Errorf("Unexpected error: %s", err)
}
if err == nil && test.expectError {
t.Errorf("Expected error, got none")
}
if attachID != test.expectedAttachID {
t.Errorf("Expected attachID %q, got %q", test.expectedAttachID, attachID)
}
})
}
}
@ -237,7 +451,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
}
for i, tc := range testCases {
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -287,7 +501,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
}
func TestAttacherVolumesAreAttached(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -374,7 +588,7 @@ func TestAttacherDetach(t *testing.T) {
for _, tc := range testCases {
t.Logf("running test: %v", tc.name)
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
if tc.reactor != nil {
client.PrependReactor("*", "*", tc.reactor)
@ -423,7 +637,7 @@ func TestAttacherDetach(t *testing.T) {
func TestAttacherGetDeviceMountPath(t *testing.T) {
// Setup
// Create a new attacher
plug, _, tmpDir, _ := newTestWatchPlugin(t)
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -532,7 +746,7 @@ func TestAttacherMountDevice(t *testing.T) {
// Setup
// Create a new attacher
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -541,6 +755,10 @@ func TestAttacherMountDevice(t *testing.T) {
csiAttacher := attacher.(*csiAttacher)
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
if tc.deviceMountPath != "" {
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
}
nodeName := string(csiAttacher.plugin.host.GetNodeName())
// Create spec
@ -585,12 +803,12 @@ func TestAttacherMountDevice(t *testing.T) {
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", numStaged, len(staged))
}
if tc.stageUnstageSet {
gotPath, ok := staged[tc.volName]
vol, ok := staged[tc.volName]
if !ok {
t.Errorf("could not find staged volume: %s", tc.volName)
}
if gotPath != tc.deviceMountPath {
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, gotPath)
if vol.Path != tc.deviceMountPath {
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, vol.Path)
}
}
}
@ -649,6 +867,8 @@ func TestAttacherUnmountDevice(t *testing.T) {
},
{
testName: "stage_unstage not set no vars should not fail",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName":"test-driver","volumeHandle":"test-vol1"}`,
stageUnstageSet: false,
},
}
@ -657,7 +877,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
t.Logf("Running test case: %s", tc.testName)
// Setup
// Create a new attacher
plug, _, tmpDir, _ := newTestWatchPlugin(t)
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -672,7 +892,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
// Add the volume to NodeStagedVolumes
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath, nil)
// Make JSON for this object
if tc.deviceMountPath != "" {
@ -743,7 +963,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
}
// create a plugin mgr to load plugins and setup a fake client
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
func newTestWatchPlugin(t *testing.T, csiClient *fakecsi.Clientset) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
@ -753,10 +973,15 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, s
fakeWatcher := watch.NewRaceFreeFake()
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
host := volumetest.NewFakeVolumeHost(
if csiClient == nil {
csiClient = fakecsi.NewSimpleClientset()
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
csiClient,
nil,
"node",
)
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
@ -771,5 +996,12 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, s
t.Fatalf("cannot assert plugin to be type csiPlugin")
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return csiPlug.csiDriverInformer.Informer().HasSynced(), nil
})
}
return csiPlug, fakeWatcher, tmpDir, fakeClient
}

View File

@ -21,22 +21,26 @@ import (
"errors"
"fmt"
"os"
"path"
"path/filepath"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
ioutil "k8s.io/kubernetes/pkg/volume/util"
)
type csiBlockMapper struct {
k8s kubernetes.Interface
csiClient csiClient
plugin *csiPlugin
driverName string
driverName csiDriverName
specName string
volumeID string
readOnly bool
@ -47,76 +51,61 @@ type csiBlockMapper struct {
var _ volume.BlockVolumeMapper = &csiBlockMapper{}
// GetGlobalMapPath returns a path (on the node) where the devicePath will be symlinked to
// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}
// GetGlobalMapPath returns a global map path (on the node) to a device file which will be symlinked to
// Example: plugins/kubernetes.io/csi/volumeDevices/{pvname}/dev
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host)
glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
return dir, nil
}
// GetPodDeviceMapPath returns pod's device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~csi/, {volumeID}
// getStagingPath returns a staging path for a directory (on the node) that should be used on NodeStageVolume/NodeUnstageVolume
// Example: plugins/kubernetes.io/csi/volumeDevices/staging/{pvname}
func (m *csiBlockMapper) getStagingPath() string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(m.specName)
return path.Join(m.plugin.host.GetVolumeDevicePluginDir(csiPluginName), "staging", sanitizedSpecVolID)
}
// getPublishPath returns a publish path for a file (on the node) that should be used on NodePublishVolume/NodeUnpublishVolume
// Example: plugins/kubernetes.io/csi/volumeDevices/publish/{pvname}
func (m *csiBlockMapper) getPublishPath() string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(m.specName)
return path.Join(m.plugin.host.GetVolumeDevicePluginDir(csiPluginName), "publish", sanitizedSpecVolID)
}
// GetPodDeviceMapPath returns pod's device file which will be mapped to a volume
// returns: pods/{podUid}/volumeDevices/kubernetes.io~csi, {pvname}
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
path, specName := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName
glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath = %s", path))
path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, kstrings.EscapeQualifiedNameForDisk(csiPluginName))
specName := m.specName
klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, specName))
return path, specName
}
// SetUpDevice ensures the device is attached returns path where the device is located.
func (m *csiBlockMapper) SetUpDevice() (string, error) {
if !m.plugin.blockEnabled {
return "", errors.New("CSIBlockVolume feature not enabled")
}
// stageVolumeForBlock stages a block volume to stagingPath
func (m *csiBlockMapper) stageVolumeForBlock(
ctx context.Context,
csi csiClient,
accessMode v1.PersistentVolumeAccessMode,
csiSource *v1.CSIPersistentVolumeSource,
attachment *storage.VolumeAttachment,
) (string, error) {
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock called"))
glog.V(4).Infof(log("blockMapper.SetupDevice called"))
if m.spec == nil {
glog.Error(log("blockMapper.Map spec is nil"))
return "", fmt.Errorf("spec is nil")
}
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err))
return "", err
}
globalMapPath, err := m.GetGlobalMapPath(m.spec)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err))
return "", err
}
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
stagingPath := m.getStagingPath()
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
klog.Error(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
return "", err
}
if !stageUnstageSet {
glog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
klog.Infof(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
return "", nil
}
// Start MountDevice
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
return "", err
}
if attachment == nil {
glog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID))
return "", errors.New("no existing VolumeAttachment found")
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
nodeStageSecrets := map[string]string{}
@ -128,95 +117,126 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
}
}
// create globalMapPath before call to NodeStageVolume
if err := os.MkdirAll(globalMapPath, 0750); err != nil {
glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err))
// Creating a stagingPath directory before call to NodeStageVolume
if err := os.MkdirAll(stagingPath, 0750); err != nil {
klog.Error(log("blockMapper.stageVolumeForBlock failed to create dir %s: %v", stagingPath, err))
return "", err
}
glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if m.spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
}
klog.V(4).Info(log("blockMapper.stageVolumeForBlock created stagingPath directory successfully [%s]", stagingPath))
// Request to stage a block volume to stagingPath.
// Expected implementation for driver is creating driver specific resource on stagingPath and
// attaching the block volume to the node.
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
globalMapPath,
stagingPath,
fsTypeBlockName,
accessMode,
nodeStageSecrets,
csiSource.VolumeAttributes)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed: %v", err))
if err := os.RemoveAll(globalMapPath); err != nil {
glog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err))
}
klog.Error(log("blockMapper.stageVolumeForBlock failed: %v", err))
return "", err
}
glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPath))
return globalMapPath, nil
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
return stagingPath, nil
}
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
if !m.plugin.blockEnabled {
return errors.New("CSIBlockVolume feature not enabled")
// publishVolumeForBlock publishes a block volume to publishPath
func (m *csiBlockMapper) publishVolumeForBlock(
ctx context.Context,
csi csiClient,
accessMode v1.PersistentVolumeAccessMode,
csiSource *v1.CSIPersistentVolumeSource,
attachment *storage.VolumeAttachment,
stagingPath string,
) (string, error) {
klog.V(4).Infof(log("blockMapper.publishVolumeForBlock called"))
publishVolumeInfo := attachment.Status.AttachmentMetadata
nodePublishSecrets := map[string]string{}
var err error
if csiSource.NodePublishSecretRef != nil {
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
if err != nil {
klog.Errorf("blockMapper.publishVolumeForBlock failed to get NodePublishSecretRef %s/%s: %v",
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
return "", err
}
}
glog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath))
publishPath := m.getPublishPath()
// Setup a parent directory for publishPath before call to NodePublishVolume
publishDir := filepath.Dir(publishPath)
if err := os.MkdirAll(publishDir, 0750); err != nil {
klog.Error(log("blockMapper.publishVolumeForBlock failed to create dir %s: %v", publishDir, err))
return "", err
}
klog.V(4).Info(log("blockMapper.publishVolumeForBlock created directory for publishPath successfully [%s]", publishDir))
// Request to publish a block volume to publishPath.
// Expectation for driver is to place a block volume on the publishPath, by bind-mounting the device file on the publishPath or
// creating device file on the publishPath.
// Parent directory for publishPath is created by k8s, but driver is responsible for creating publishPath itself.
// If driver doesn't implement NodeStageVolume, attaching the block volume to the node may be done, here.
err = csi.NodePublishVolume(
ctx,
m.volumeID,
m.readOnly,
stagingPath,
publishPath,
accessMode,
publishVolumeInfo,
csiSource.VolumeAttributes,
nodePublishSecrets,
fsTypeBlockName,
[]string{},
)
if err != nil {
klog.Errorf(log("blockMapper.publishVolumeForBlock failed: %v", err))
return "", err
}
return publishPath, nil
}
// SetUpDevice ensures the device is attached returns path where the device is located.
func (m *csiBlockMapper) SetUpDevice() (string, error) {
if !m.plugin.blockEnabled {
return "", errors.New("CSIBlockVolume feature not enabled")
}
klog.V(4).Infof(log("blockMapper.SetUpDevice called"))
// Get csiSource from spec
if m.spec == nil {
glog.Error(log("blockMapper.MapDevice spec is nil"))
return fmt.Errorf("spec is nil")
klog.Error(log("blockMapper.SetUpDevice spec is nil"))
return "", fmt.Errorf("spec is nil")
}
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
glog.Error(log("blockMapper.Map failed to get CSI persistent source: %v", err))
return err
klog.Error(log("blockMapper.SetUpDevice failed to get CSI persistent source: %v", err))
return "", err
}
dir := filepath.Join(volumeMapPath, volumeMapName)
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err))
return err
klog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
return "", err
}
if attachment == nil {
glog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
klog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID))
return "", errors.New("no existing VolumeAttachment found")
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
nodePublishSecrets := map[string]string{}
if csiSource.NodePublishSecretRef != nil {
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
if err != nil {
glog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v",
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
return err
}
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("blockMapper.MapDevice failed to create dir %#v: %v", dir, err))
return err
}
glog.V(4).Info(log("blockMapper.MapDevice created NodePublish path [%s]", dir))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
@ -224,60 +244,117 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
}
err = csi.NodePublishVolume(
ctx,
m.volumeID,
m.readOnly,
globalMapPath,
dir,
accessMode,
publishVolumeInfo,
csiSource.VolumeAttributes,
nodePublishSecrets,
fsTypeBlockName,
)
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Call NodeStageVolume
stagingPath, err := m.stageVolumeForBlock(ctx, m.csiClient, accessMode, csiSource, attachment)
if err != nil {
glog.Errorf(log("blockMapper.MapDevice failed: %v", err))
if err := os.RemoveAll(dir); err != nil {
glog.Error(log("blockMapper.MapDevice failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
}
return "", err
}
// Call NodePublishVolume
publishPath, err := m.publishVolumeForBlock(ctx, m.csiClient, accessMode, csiSource, attachment, stagingPath)
if err != nil {
return "", err
}
return publishPath, nil
}
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
return ioutil.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
}
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
// unpublishVolumeForBlock unpublishes a block volume from publishPath
func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiClient, publishPath string) error {
// Request to unpublish a block volume from publishPath.
// Expectation for driver is to remove block volume from the publishPath, by unmounting bind-mounted device file
// or deleting device file.
// Driver is responsible for deleting publishPath itself.
// If driver doesn't implement NodeUnstageVolume, detaching the block volume from the node may be done, here.
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil {
klog.Error(log("blockMapper.unpublishVolumeForBlock failed: %v", err))
return err
}
klog.V(4).Infof(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
return nil
}
// unstageVolumeForBlock unstages a block volume from stagingPath
func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClient, stagingPath string) error {
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
klog.Error(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
return err
}
if !stageUnstageSet {
klog.Infof(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
return nil
}
// Request to unstage a block volume from stagingPath.
// Expected implementation for driver is removing driver specific resource in stagingPath and
// detaching the block volume from the node.
if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil {
klog.Errorf(log("blockMapper.unstageVolumeForBlock failed: %v", err))
return err
}
klog.V(4).Infof(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
// Remove stagingPath directory and its contents
if err := os.RemoveAll(stagingPath); err != nil {
klog.Error(log("blockMapper.unstageVolumeForBlock failed to remove staging path after NodeUnstageVolume() error [%s]: %v", stagingPath, err))
return err
}
return nil
}
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
// TearDownDevice removes traces of the SetUpDevice.
func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error {
if !m.plugin.blockEnabled {
return errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath))
klog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath))
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// unmap global device map path
if err := csi.NodeUnstageVolume(ctx, m.volumeID, globalMapPath); err != nil {
glog.Errorf(log("blockMapper.TearDownDevice failed: %v", err))
return err
}
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath))
// request to remove pod volume map path also
podVolumePath, volumeName := m.GetPodDeviceMapPath()
podVolumeMapPath := filepath.Join(podVolumePath, volumeName)
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, podVolumeMapPath); err != nil {
glog.Error(log("blockMapper.TearDownDevice failed: %v", err))
return err
// Call NodeUnpublishVolume
publishPath := m.getPublishPath()
if _, err := os.Stat(publishPath); err != nil {
if os.IsNotExist(err) {
klog.V(4).Infof(log("blockMapper.TearDownDevice publishPath(%s) has already been deleted, skip calling NodeUnpublishVolume", publishPath))
} else {
return err
}
} else {
err := m.unpublishVolumeForBlock(ctx, m.csiClient, publishPath)
if err != nil {
return err
}
}
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath))
// Call NodeUnstageVolume
stagingPath := m.getStagingPath()
if _, err := os.Stat(stagingPath); err != nil {
if os.IsNotExist(err) {
klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
} else {
return err
}
} else {
err := m.unstageVolumeForBlock(ctx, m.csiClient, stagingPath)
if err != nil {
return err
}
}
return nil
}

View File

@ -25,13 +25,34 @@ import (
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
fakeclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func prepareBlockMapperTest(plug *csiPlugin, specVolumeName string, t *testing.T) (*csiBlockMapper, *volume.Spec, *api.PersistentVolume, error) {
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV(specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
return nil, nil, nil, fmt.Errorf("Failed to make a new Mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
return csiMapper, spec, pv, nil
}
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
@ -53,17 +74,10 @@ func TestBlockMapperGetGlobalMapPath(t *testing.T) {
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
csiMapper, spec, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
path, err := csiMapper.GetGlobalMapPath(spec)
if err != nil {
@ -76,35 +90,147 @@ func TestBlockMapperGetGlobalMapPath(t *testing.T) {
}
}
func TestBlockMapperGetStagingPath(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/staging/%s", "spec-0")),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/staging/%s", "test.spec.1")),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
path := csiMapper.getStagingPath()
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
}
}
func TestBlockMapperGetPublishPath(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/publish/%s", "spec-0")),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/publish/%s", "test.spec.1")),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
path := csiMapper.getPublishPath()
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
}
}
func TestBlockMapperGetDeviceMapPath(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumeDevices/kubernetes.io~csi", testPodUID)),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumeDevices/kubernetes.io~csi", testPodUID)),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
path, volName := csiMapper.GetPodDeviceMapPath()
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
if tc.specVolumeName != volName {
t.Errorf("expecting volName %s, got %s", tc.specVolumeName, volName)
}
}
}
func TestBlockMapperSetupDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv", t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
pvName := pv.GetName()
nodeName := string(plug.host.GetNodeName())
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// MapDevice
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to create new mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
csiMapper.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName))
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = true
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
@ -118,50 +244,60 @@ func TestBlockMapperSetupDevice(t *testing.T) {
t.Fatalf("mapper failed to SetupDevice: %v", err)
}
globalMapPath, err := csiMapper.GetGlobalMapPath(spec)
if err != nil {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
// Check if SetUpDevice returns the right path
publishPath := csiMapper.getPublishPath()
if devicePath != publishPath {
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, publishPath)
}
if devicePath != globalMapPath {
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, globalMapPath)
// Check if NodeStageVolume staged to the right path
stagingPath := csiMapper.getStagingPath()
svols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
svol, ok := svols[csiMapper.volumeID]
if !ok {
t.Error("csi server may not have received NodeStageVolume call")
}
if svol.Path != stagingPath {
t.Errorf("csi server expected device path %s, got %s", stagingPath, svol.Path)
}
vols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if vols[csiMapper.volumeID] != devicePath {
// Check if NodePublishVolume published to the right path
pvols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
pvol, ok := pvols[csiMapper.volumeID]
if !ok {
t.Error("csi server may not have received NodePublishVolume call")
}
if pvol.Path != publishPath {
t.Errorf("csi server expected path %s, got %s", publishPath, pvol.Path)
}
}
func TestBlockMapperMapDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv", t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
pvName := pv.GetName()
nodeName := string(plug.host.GetNodeName())
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// MapDevice
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to create new mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
csiMapper.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName))
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = true
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
@ -179,6 +315,16 @@ func TestBlockMapperMapDevice(t *testing.T) {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
// Actual SetupDevice should create a symlink to or a bind mout of device in devicePath.
// Create dummy file there before calling MapDevice to test it properly.
fd, err := os.Create(devicePath)
if err != nil {
t.Fatalf("mapper failed to create dummy file in devicePath: %v", err)
}
if err := fd.Close(); err != nil {
t.Fatalf("mapper failed to close dummy file in devicePath: %v", err)
}
// Map device to global and pod device map path
volumeMapPath, volName := csiMapper.GetPodDeviceMapPath()
err = csiMapper.MapDevice(devicePath, globalMapPath, volumeMapPath, volName, csiMapper.podUID)
@ -186,33 +332,48 @@ func TestBlockMapperMapDevice(t *testing.T) {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
if _, err := os.Stat(filepath.Join(volumeMapPath, volName)); err != nil {
// Check if symlink {globalMapPath}/{podUID} exists
globalMapFilePath := filepath.Join(globalMapPath, string(csiMapper.podUID))
if _, err := os.Stat(globalMapFilePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("mapper.MapDevice failed, volume path not created: %s", volumeMapPath)
t.Errorf("mapper.MapDevice failed, symlink in globalMapPath not created: %v", err)
t.Errorf("mapper.MapDevice devicePath:%v, globalMapPath: %v, globalMapFilePath: %v",
devicePath, globalMapPath, globalMapFilePath)
} else {
t.Errorf("mapper.MapDevice failed: %v", err)
}
}
pubs := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMapper.volumeID] != volumeMapPath {
t.Error("csi server may not have received NodePublishVolume call")
// Check if symlink {volumeMapPath}/{volName} exists
volumeMapFilePath := filepath.Join(volumeMapPath, volName)
if _, err := os.Stat(volumeMapFilePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("mapper.MapDevice failed, symlink in volumeMapPath not created: %v", err)
} else {
t.Errorf("mapper.MapDevice failed: %v", err)
}
}
}
func TestBlockMapperTearDownDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
_, spec, pv, err := prepareBlockMapperTest(plug, "test-pv", t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
// save volume data
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
@ -250,15 +411,15 @@ func TestBlockMapperTearDownDevice(t *testing.T) {
t.Fatal(err)
}
// ensure csi client call and node unstaged
vols := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if _, ok := vols[csiUnmapper.volumeID]; ok {
t.Error("csi server may not have received NodeUnstageVolume call")
}
// ensure csi client call and node unpblished
pubs := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if _, ok := pubs[csiUnmapper.volumeID]; ok {
t.Error("csi server may not have received NodeUnpublishVolume call")
}
// ensure csi client call and node unstaged
vols := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if _, ok := vols[csiUnmapper.volumeID]; ok {
t.Error("csi server may not have received NodeUnstageVolume call")
}
}

View File

@ -20,18 +20,26 @@ import (
"context"
"errors"
"fmt"
"io"
"net"
"time"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc"
api "k8s.io/api/core/v1"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
csipbv0 "k8s.io/kubernetes/pkg/volume/csi/csiv0"
)
type csiClient interface {
NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error)
NodePublishVolume(
ctx context.Context,
volumeid string,
@ -39,10 +47,11 @@ type csiClient interface {
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
nodePublishSecrets map[string]string,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error
NodeUnpublishVolume(
ctx context.Context,
@ -55,24 +64,176 @@ type csiClient interface {
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
nodeStageSecrets map[string]string,
volumeAttribs map[string]string,
secrets map[string]string,
volumeContext map[string]string,
) error
NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error
NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error)
NodeSupportsStageUnstage(ctx context.Context) (bool, error)
}
// Strongly typed address
type csiAddr string
// Strongly typed driver name
type csiDriverName string
// csiClient encapsulates all csi-plugin methods
type csiDriverClient struct {
driverName string
nodeClient csipb.NodeClient
driverName csiDriverName
addr csiAddr
nodeV1ClientCreator nodeV1ClientCreator
nodeV0ClientCreator nodeV0ClientCreator
}
var _ csiClient = &csiDriverClient{}
func newCsiDriverClient(driverName string) *csiDriverClient {
c := &csiDriverClient{driverName: driverName}
return c
type nodeV1ClientCreator func(addr csiAddr) (
nodeClient csipbv1.NodeClient,
closer io.Closer,
err error,
)
type nodeV0ClientCreator func(addr csiAddr) (
nodeClient csipbv0.NodeClient,
closer io.Closer,
err error,
)
// newV1NodeClient creates a new NodeClient with the internally used gRPC
// connection set up. It also returns a closer which must to be called to close
// the gRPC connection when the NodeClient is not used anymore.
// This is the default implementation for the nodeV1ClientCreator, used in
// newCsiDriverClient.
func newV1NodeClient(addr csiAddr) (nodeClient csipbv1.NodeClient, closer io.Closer, err error) {
var conn *grpc.ClientConn
conn, err = newGrpcConn(addr)
if err != nil {
return nil, nil, err
}
nodeClient = csipbv1.NewNodeClient(conn)
return nodeClient, conn, nil
}
// newV0NodeClient creates a new NodeClient with the internally used gRPC
// connection set up. It also returns a closer which must to be called to close
// the gRPC connection when the NodeClient is not used anymore.
// This is the default implementation for the nodeV1ClientCreator, used in
// newCsiDriverClient.
func newV0NodeClient(addr csiAddr) (nodeClient csipbv0.NodeClient, closer io.Closer, err error) {
var conn *grpc.ClientConn
conn, err = newGrpcConn(addr)
if err != nil {
return nil, nil, err
}
nodeClient = csipbv0.NewNodeClient(conn)
return nodeClient, conn, nil
}
func newCsiDriverClient(driverName csiDriverName) (*csiDriverClient, error) {
if driverName == "" {
return nil, fmt.Errorf("driver name is empty")
}
addr := fmt.Sprintf(csiAddrTemplate, driverName)
requiresV0Client := true
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) {
var existingDriver csiDriver
driverExists := false
func() {
csiDrivers.RLock()
defer csiDrivers.RUnlock()
existingDriver, driverExists = csiDrivers.driversMap[string(driverName)]
}()
if !driverExists {
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
}
addr = existingDriver.driverEndpoint
requiresV0Client = versionRequiresV0Client(existingDriver.highestSupportedVersion)
}
nodeV1ClientCreator := newV1NodeClient
nodeV0ClientCreator := newV0NodeClient
if requiresV0Client {
nodeV1ClientCreator = nil
} else {
nodeV0ClientCreator = nil
}
return &csiDriverClient{
driverName: driverName,
addr: csiAddr(addr),
nodeV1ClientCreator: nodeV1ClientCreator,
nodeV0ClientCreator: nodeV0ClientCreator,
}, nil
}
func (c *csiDriverClient) NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
klog.V(4).Info(log("calling NodeGetInfo rpc"))
if c.nodeV1ClientCreator != nil {
return c.nodeGetInfoV1(ctx)
} else if c.nodeV0ClientCreator != nil {
return c.nodeGetInfoV0(ctx)
}
err = fmt.Errorf("failed to call NodeGetInfo. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
return nodeID, maxVolumePerNode, accessibleTopology, err
}
func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return "", 0, nil, err
}
defer closer.Close()
res, err := nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{})
if err != nil {
return "", 0, nil, err
}
topology := res.GetAccessibleTopology()
if topology != nil {
accessibleTopology = topology.Segments
}
return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil
}
func (c *csiDriverClient) nodeGetInfoV0(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return "", 0, nil, err
}
defer closer.Close()
res, err := nodeClient.NodeGetInfo(ctx, &csipbv0.NodeGetInfoRequest{})
if err != nil {
return "", 0, nil, err
}
topology := res.GetAccessibleTopology()
if topology != nil {
accessibleTopology = topology.Segments
}
return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil
}
func (c *csiDriverClient) NodePublishVolume(
@ -82,36 +243,82 @@ func (c *csiDriverClient) NodePublishVolume(
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
nodePublishSecrets map[string]string,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error {
glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
klog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
}
if targetPath == "" {
return errors.New("missing target path")
}
if c.nodeV1ClientCreator != nil {
return c.nodePublishVolumeV1(
ctx,
volID,
readOnly,
stagingTargetPath,
targetPath,
accessMode,
publishContext,
volumeContext,
secrets,
fsType,
mountOptions,
)
} else if c.nodeV0ClientCreator != nil {
return c.nodePublishVolumeV0(
ctx,
volID,
readOnly,
stagingTargetPath,
targetPath,
accessMode,
publishContext,
volumeContext,
secrets,
fsType,
mountOptions,
)
}
conn, err := newGrpcConn(c.driverName)
return fmt.Errorf("failed to call NodePublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodePublishVolumeV1(
ctx context.Context,
volID string,
readOnly bool,
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishInfo: volumeInfo,
VolumeAttributes: volumeAttribs,
NodePublishSecrets: nodePublishSecrets,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
req := &csipbv1.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishContext: publishContext,
VolumeContext: volumeContext,
Secrets: secrets,
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV1(accessMode),
},
},
}
@ -120,13 +327,67 @@ func (c *csiDriverClient) NodePublishVolume(
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
Block: &csipb.VolumeCapability_BlockVolume{},
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
},
}
}
_, err = nodeClient.NodePublishVolume(ctx, req)
return err
}
func (c *csiDriverClient) nodePublishVolumeV0(
ctx context.Context,
volID string,
readOnly bool,
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv0.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishInfo: publishContext,
VolumeAttributes: volumeContext,
NodePublishSecrets: secrets,
VolumeCapability: &csipbv0.VolumeCapability{
AccessMode: &csipbv0.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV0(accessMode),
},
},
}
if stagingTargetPath != "" {
req.StagingTargetPath = stagingTargetPath
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{
Block: &csipbv0.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{
Mount: &csipbv0.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
},
}
}
@ -136,7 +397,7 @@ func (c *csiDriverClient) NodePublishVolume(
}
func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
klog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
}
@ -144,14 +405,39 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string,
return errors.New("missing target path")
}
conn, err := newGrpcConn(c.driverName)
if c.nodeV1ClientCreator != nil {
return c.nodeUnpublishVolumeV1(ctx, volID, targetPath)
} else if c.nodeV0ClientCreator != nil {
return c.nodeUnpublishVolumeV0(ctx, volID, targetPath)
}
return fmt.Errorf("failed to call NodeUnpublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodeUnpublishVolumeV1(ctx context.Context, volID string, targetPath string) error {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodeUnpublishVolumeRequest{
req := &csipbv1.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
_, err = nodeClient.NodeUnpublishVolume(ctx, req)
return err
}
func (c *csiDriverClient) nodeUnpublishVolumeV0(ctx context.Context, volID string, targetPath string) error {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv0.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
@ -162,14 +448,14 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string,
func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
volID string,
publishInfo map[string]string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
nodeStageSecrets map[string]string,
volumeAttribs map[string]string,
secrets map[string]string,
volumeContext map[string]string,
) error {
glog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
klog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
if volID == "" {
return errors.New("missing volume id")
}
@ -177,33 +463,96 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
return errors.New("missing staging target path")
}
conn, err := newGrpcConn(c.driverName)
if c.nodeV1ClientCreator != nil {
return c.nodeStageVolumeV1(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext)
} else if c.nodeV0ClientCreator != nil {
return c.nodeStageVolumeV0(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext)
}
return fmt.Errorf("failed to call NodeStageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodeStageVolumeV1(
ctx context.Context,
volID string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
secrets map[string]string,
volumeContext map[string]string,
) error {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodeStageVolumeRequest{
req := &csipbv1.NodeStageVolumeRequest{
VolumeId: volID,
PublishInfo: publishInfo,
PublishContext: publishContext,
StagingTargetPath: stagingTargetPath,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV1(accessMode),
},
},
NodeStageSecrets: nodeStageSecrets,
VolumeAttributes: volumeAttribs,
Secrets: secrets,
VolumeContext: volumeContext,
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
Block: &csipb.VolumeCapability_BlockVolume{},
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
},
}
}
_, err = nodeClient.NodeStageVolume(ctx, req)
return err
}
func (c *csiDriverClient) nodeStageVolumeV0(
ctx context.Context,
volID string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
secrets map[string]string,
volumeContext map[string]string,
) error {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv0.NodeStageVolumeRequest{
VolumeId: volID,
PublishInfo: publishContext,
StagingTargetPath: stagingTargetPath,
VolumeCapability: &csipbv0.VolumeCapability{
AccessMode: &csipbv0.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV0(accessMode),
},
},
NodeStageSecrets: secrets,
VolumeAttributes: volumeContext,
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{
Block: &csipbv0.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{
Mount: &csipbv0.VolumeCapability_MountVolume{
FsType: fsType,
},
}
@ -214,7 +563,7 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
}
func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
klog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
if volID == "" {
return errors.New("missing volume id")
}
@ -222,14 +571,23 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT
return errors.New("missing staging target path")
}
conn, err := newGrpcConn(c.driverName)
if c.nodeV1ClientCreator != nil {
return c.nodeUnstageVolumeV1(ctx, volID, stagingTargetPath)
} else if c.nodeV0ClientCreator != nil {
return c.nodeUnstageVolumeV0(ctx, volID, stagingTargetPath)
}
return fmt.Errorf("failed to call NodeUnstageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodeUnstageVolumeV1(ctx context.Context, volID, stagingTargetPath string) error {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodeUnstageVolumeRequest{
req := &csipbv1.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
@ -237,57 +595,128 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT
return err
}
func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
glog.V(4).Info(log("calling NodeGetCapabilities rpc"))
conn, err := newGrpcConn(c.driverName)
func (c *csiDriverClient) nodeUnstageVolumeV0(ctx context.Context, volID, stagingTargetPath string) error {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return nil, err
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodeGetCapabilitiesRequest{}
req := &csipbv0.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
_, err = nodeClient.NodeUnstageVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) {
klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsStageUnstage"))
if c.nodeV1ClientCreator != nil {
return c.nodeSupportsStageUnstageV1(ctx)
} else if c.nodeV0ClientCreator != nil {
return c.nodeSupportsStageUnstageV0(ctx)
}
return false, fmt.Errorf("failed to call NodeSupportsStageUnstage. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodeSupportsStageUnstageV1(ctx context.Context) (bool, error) {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return false, err
}
defer closer.Close()
req := &csipbv1.NodeGetCapabilitiesRequest{}
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return nil, err
return false, err
}
return resp.GetCapabilities(), nil
capabilities := resp.GetCapabilities()
stageUnstageSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
stageUnstageSet = true
}
}
return stageUnstageSet, nil
}
func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode {
func (c *csiDriverClient) nodeSupportsStageUnstageV0(ctx context.Context) (bool, error) {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return false, err
}
defer closer.Close()
req := &csipbv0.NodeGetCapabilitiesRequest{}
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return false, err
}
capabilities := resp.GetCapabilities()
stageUnstageSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipbv0.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
stageUnstageSet = true
}
}
return stageUnstageSet, nil
}
func asCSIAccessModeV1(am api.PersistentVolumeAccessMode) csipbv1.VolumeCapability_AccessMode_Mode {
switch am {
case api.ReadWriteOnce:
return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
return csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case api.ReadOnlyMany:
return csipb.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.ReadWriteMany:
return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
}
return csipb.VolumeCapability_AccessMode_UNKNOWN
return csipbv1.VolumeCapability_AccessMode_UNKNOWN
}
func newGrpcConn(driverName string) (*grpc.ClientConn, error) {
if driverName == "" {
return nil, fmt.Errorf("driver name is empty")
}
addr := fmt.Sprintf(csiAddrTemplate, driverName)
// TODO once KubeletPluginsWatcher graduates to beta, remove FeatureGate check
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) {
driver, ok := csiDrivers.driversMap[driverName]
if !ok {
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
}
addr = driver.driverEndpoint
func asCSIAccessModeV0(am api.PersistentVolumeAccessMode) csipbv0.VolumeCapability_AccessMode_Mode {
switch am {
case api.ReadWriteOnce:
return csipbv0.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case api.ReadOnlyMany:
return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.ReadWriteMany:
return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
}
return csipbv0.VolumeCapability_AccessMode_UNKNOWN
}
func newGrpcConn(addr csiAddr) (*grpc.ClientConn, error) {
network := "unix"
glog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr))
klog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr))
return grpc.Dial(
addr,
string(addr),
grpc.WithInsecure(),
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
return net.Dial(network, target)
}),
)
}
func versionRequiresV0Client(version *utilversion.Version) bool {
if version != nil && version.Major() == 0 {
return true
}
return false
}

View File

@ -19,9 +19,11 @@ package csi
import (
"context"
"errors"
"io"
"reflect"
"testing"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
api "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume/csi/fake"
)
@ -38,6 +40,19 @@ func newFakeCsiDriverClient(t *testing.T, stagingCapable bool) *fakeCsiDriverCli
}
}
func (c *fakeCsiDriverClient) NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
resp, err := c.nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{})
topology := resp.GetAccessibleTopology()
if topology != nil {
accessibleTopology = topology.Segments
}
return resp.GetNodeId(), resp.GetMaxVolumesPerNode(), accessibleTopology, err
}
func (c *fakeCsiDriverClient) NodePublishVolume(
ctx context.Context,
volID string,
@ -45,26 +60,28 @@ func (c *fakeCsiDriverClient) NodePublishVolume(
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
nodePublishSecrets map[string]string,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error {
c.t.Log("calling fake.NodePublishVolume...")
req := &csipb.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishInfo: volumeInfo,
VolumeAttributes: volumeAttribs,
NodePublishSecrets: nodePublishSecrets,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
req := &csipbv1.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishContext: publishContext,
VolumeContext: volumeContext,
Secrets: secrets,
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV1(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
AccessType: &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
},
},
},
@ -76,7 +93,7 @@ func (c *fakeCsiDriverClient) NodePublishVolume(
func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
c.t.Log("calling fake.NodeUnpublishVolume...")
req := &csipb.NodeUnpublishVolumeRequest{
req := &csipbv1.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
@ -87,30 +104,30 @@ func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID str
func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
volID string,
publishInfo map[string]string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
nodeStageSecrets map[string]string,
volumeAttribs map[string]string,
secrets map[string]string,
volumeContext map[string]string,
) error {
c.t.Log("calling fake.NodeStageVolume...")
req := &csipb.NodeStageVolumeRequest{
req := &csipbv1.NodeStageVolumeRequest{
VolumeId: volID,
PublishInfo: publishInfo,
PublishContext: publishContext,
StagingTargetPath: stagingTargetPath,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV1(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
AccessType: &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
},
},
},
NodeStageSecrets: nodeStageSecrets,
VolumeAttributes: volumeAttribs,
Secrets: secrets,
VolumeContext: volumeContext,
}
_, err := c.nodeClient.NodeStageVolume(ctx, req)
@ -119,7 +136,7 @@ func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
c.t.Log("calling fake.NodeUnstageVolume...")
req := &csipb.NodeUnstageVolumeRequest{
req := &csipbv1.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
@ -127,20 +144,109 @@ func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stag
return err
}
func (c *fakeCsiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
c.t.Log("calling fake.NodeGetCapabilities...")
req := &csipb.NodeGetCapabilitiesRequest{}
func (c *fakeCsiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) {
c.t.Log("calling fake.NodeGetCapabilities for NodeSupportsStageUnstage...")
req := &csipbv1.NodeGetCapabilitiesRequest{}
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return nil, err
return false, err
}
return resp.GetCapabilities(), nil
capabilities := resp.GetCapabilities()
stageUnstageSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
stageUnstageSet = true
}
}
return stageUnstageSet, nil
}
func setupClient(t *testing.T, stageUnstageSet bool) csiClient {
return newFakeCsiDriverClient(t, stageUnstageSet)
}
func checkErr(t *testing.T, expectedAnError bool, actualError error) {
t.Helper()
errOccurred := actualError != nil
if expectedAnError && !errOccurred {
t.Error("expected an error")
}
if !expectedAnError && errOccurred {
t.Errorf("expected no error, got: %v", actualError)
}
}
func TestClientNodeGetInfo(t *testing.T) {
testCases := []struct {
name string
expectedNodeID string
expectedMaxVolumePerNode int64
expectedAccessibleTopology map[string]string
mustFail bool
err error
}{
{
name: "test ok",
expectedNodeID: "node1",
expectedMaxVolumePerNode: 16,
expectedAccessibleTopology: map[string]string{"com.example.csi-topology/zone": "zone1"},
},
{
name: "grpc error",
mustFail: true,
err: errors.New("grpc error"),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
nodeClient.SetNodeGetInfoResp(&csipbv1.NodeGetInfoResponse{
NodeId: tc.expectedNodeID,
MaxVolumesPerNode: tc.expectedMaxVolumePerNode,
AccessibleTopology: &csipbv1.Topology{
Segments: tc.expectedAccessibleTopology,
},
})
return nodeClient, fakeCloser, nil
},
}
nodeID, maxVolumePerNode, accessibleTopology, err := client.NodeGetInfo(context.Background())
checkErr(t, tc.mustFail, err)
if nodeID != tc.expectedNodeID {
t.Errorf("expected nodeID: %v; got: %v", tc.expectedNodeID, nodeID)
}
if maxVolumePerNode != tc.expectedMaxVolumePerNode {
t.Errorf("expected maxVolumePerNode: %v; got: %v", tc.expectedMaxVolumePerNode, maxVolumePerNode)
}
if !reflect.DeepEqual(accessibleTopology, tc.expectedAccessibleTopology) {
t.Errorf("expected accessibleTopology: %v; got: %v", tc.expectedAccessibleTopology, accessibleTopology)
}
if !tc.mustFail {
fakeCloser.Check()
}
}
}
func TestClientNodePublishVolume(t *testing.T) {
testCases := []struct {
name string
@ -157,11 +263,18 @@ func TestClientNodePublishVolume(t *testing.T) {
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false)
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
err := client.NodePublishVolume(
context.Background(),
tc.volID,
@ -173,10 +286,12 @@ func TestClientNodePublishVolume(t *testing.T) {
map[string]string{"attr0": "val0"},
map[string]string{},
tc.fsType,
[]string{},
)
checkErr(t, tc.mustFail, err)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
if !tc.mustFail {
fakeCloser.Check()
}
}
}
@ -195,14 +310,23 @@ func TestClientNodeUnpublishVolume(t *testing.T) {
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false)
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
err := client.NodeUnpublishVolume(context.Background(), tc.volID, tc.targetPath)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
checkErr(t, tc.mustFail, err)
if !tc.mustFail {
fakeCloser.Check()
}
}
}
@ -213,7 +337,7 @@ func TestClientNodeStageVolume(t *testing.T) {
volID string
stagingTargetPath string
fsType string
secret map[string]string
secrets map[string]string
mustFail bool
err error
}{
@ -224,11 +348,18 @@ func TestClientNodeStageVolume(t *testing.T) {
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false)
for _, tc := range testCases {
t.Logf("Running test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
err := client.NodeStageVolume(
context.Background(),
tc.volID,
@ -236,12 +367,13 @@ func TestClientNodeStageVolume(t *testing.T) {
tc.stagingTargetPath,
tc.fsType,
api.ReadWriteOnce,
tc.secret,
tc.secrets,
map[string]string{"attr0": "val0"},
)
checkErr(t, tc.mustFail, err)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
if !tc.mustFail {
fakeCloser.Check()
}
}
}
@ -260,17 +392,26 @@ func TestClientNodeUnstageVolume(t *testing.T) {
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false)
for _, tc := range testCases {
t.Logf("Running test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
err := client.NodeUnstageVolume(
context.Background(),
tc.volID, tc.stagingTargetPath,
)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
checkErr(t, tc.mustFail, err)
if !tc.mustFail {
fakeCloser.Check()
}
}
}

View File

@ -23,12 +23,14 @@ import (
"os"
"path"
"github.com/golang/glog"
"k8s.io/klog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
@ -49,21 +51,22 @@ var (
"nodeName",
"attachmentID",
}
currentPodInfoMountVersion = "v1"
)
type csiMountMgr struct {
csiClient csiClient
k8s kubernetes.Interface
plugin *csiPlugin
driverName string
volumeID string
specVolumeID string
readOnly bool
spec *volume.Spec
pod *api.Pod
podUID types.UID
options volume.VolumeOptions
volumeInfo map[string]string
csiClient csiClient
k8s kubernetes.Interface
plugin *csiPlugin
driverName csiDriverName
volumeID string
specVolumeID string
readOnly bool
spec *volume.Spec
pod *api.Pod
podUID types.UID
options volume.VolumeOptions
publishContext map[string]string
volume.MetricsNil
}
@ -72,7 +75,7 @@ var _ volume.Volume = &csiMountMgr{}
func (c *csiMountMgr) GetPath() string {
dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount")
glog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
klog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
return dir
}
@ -93,61 +96,51 @@ func (c *csiMountMgr) SetUp(fsGroup *int64) error {
}
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
mounted, err := isDirMounted(c.plugin, dir)
if err != nil {
glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir))
klog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir))
return err
}
if mounted {
glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir))
klog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir))
return nil
}
csiSource, err := getCSISourceFromSpec(c.spec)
if err != nil {
glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
klog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
return err
}
csi := c.csiClient
nodeName := string(c.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so
deviceMountPath := ""
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
glog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err))
klog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err))
return err
}
if stageUnstageSet {
deviceMountPath, err = makeDeviceMountPath(c.plugin, c.spec)
if err != nil {
glog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err))
klog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err))
return err
}
}
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
if c.volumeInfo == nil {
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if c.publishContext == nil {
nodeName := string(c.plugin.host.GetNodeName())
c.publishContext, err = c.plugin.getPublishContext(c.k8s, c.volumeID, string(c.driverName), nodeName)
if err != nil {
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
return err
}
if attachment == nil {
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
}
c.volumeInfo = attachment.Status.AttachmentMetadata
}
attribs := csiSource.VolumeAttributes
@ -163,10 +156,10 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
// create target_dir before call to NodePublish
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
klog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", dir))
klog.V(4).Info(log("created target path successfully [%s]", dir))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := api.ReadWriteOnce
@ -174,6 +167,22 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
}
// Inject pod information into volume_attributes
podAttrs, err := c.podAttributes()
if err != nil {
klog.Error(log("mouter.SetUpAt failed to assemble volume attributes: %v", err))
return err
}
if podAttrs != nil {
if attribs == nil {
attribs = podAttrs
} else {
for k, v := range podAttrs {
attribs[k] = v
}
}
}
fsType := csiSource.FSType
err = csi.NodePublishVolume(
ctx,
@ -182,58 +191,85 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
deviceMountPath,
dir,
accessMode,
c.volumeInfo,
c.publishContext,
attribs,
nodePublishSecrets,
fsType,
c.spec.PersistentVolume.Spec.MountOptions,
)
if err != nil {
glog.Errorf(log("mounter.SetupAt failed: %v", err))
klog.Errorf(log("mounter.SetupAt failed: %v", err))
if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil {
glog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
klog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
}
return err
}
// apply volume ownership
if !c.readOnly && fsGroup != nil {
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
// attempt to rollback mount.
glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err))
glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID))
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
glog.Error(log(
"mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)",
c.volumeID, unpubErr, err,
))
return fmt.Errorf("%v (caused by %v)", unpubErr, err)
}
// The following logic is derived from https://github.com/kubernetes/kubernetes/issues/66323
// if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
// if fstype is provided and pv.AccessMode == ReadWriteOnly, then apply fsgroup
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
glog.Error(log(
"mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)",
dir, unmountErr, err,
))
return fmt.Errorf("%v (caused by %v)", unmountErr, err)
}
return err
err = c.applyFSGroup(fsType, fsGroup)
if err != nil {
// attempt to rollback mount.
fsGrpErr := fmt.Errorf("applyFSGroup failed for vol %s: %v", c.volumeID, err)
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
klog.Error(log("NodeUnpublishVolume failed for [%s]: %v", c.volumeID, unpubErr))
return fsGrpErr
}
glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID))
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
klog.Error(log("removeMountDir failed for [%s]: %v", dir, unmountErr))
return fsGrpErr
}
return fsGrpErr
}
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
klog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
return nil
}
func (c *csiMountMgr) podAttributes() (map[string]string, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
return nil, nil
}
if c.plugin.csiDriverLister == nil {
return nil, errors.New("CSIDriver lister does not exist")
}
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
if err != nil {
if apierrs.IsNotFound(err) {
klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName))
return nil, nil
}
return nil, err
}
// if PodInfoOnMountVersion is not set or not v1 we do not set pod attributes
if csiDriver.Spec.PodInfoOnMountVersion == nil || *csiDriver.Spec.PodInfoOnMountVersion != currentPodInfoMountVersion {
klog.V(4).Infof(log("CSIDriver %q does not require pod information", c.driverName))
return nil, nil
}
attrs := map[string]string{
"csi.storage.k8s.io/pod.name": c.pod.Name,
"csi.storage.k8s.io/pod.namespace": c.pod.Namespace,
"csi.storage.k8s.io/pod.uid": string(c.pod.UID),
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
}
klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))
return attrs, nil
}
func (c *csiMountMgr) GetAttributes() volume.Attributes {
mounter := c.plugin.host.GetMounter(c.plugin.GetPluginName())
path := c.GetPath()
supportSelinux, err := mounter.GetSELinuxSupport(path)
if err != nil {
glog.V(2).Info(log("error checking for SELinux support: %s", err))
klog.V(2).Info(log("error checking for SELinux support: %s", err))
// Best guess
supportSelinux = false
}
@ -251,19 +287,19 @@ func (c *csiMountMgr) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *csiMountMgr) TearDownAt(dir string) error {
glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir))
klog.V(4).Infof(log("Unmounter.TearDown(%s)", dir))
// is dir even mounted ?
// TODO (vladimirvivien) this check may not work for an emptyDir or local storage
// see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524
mounted, err := isDirMounted(c.plugin, dir)
if err != nil {
glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err))
klog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err))
return err
}
if !mounted {
glog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir))
klog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir))
return nil
}
@ -274,16 +310,53 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
defer cancel()
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
glog.Errorf(log("mounter.TearDownAt failed: %v", err))
klog.Errorf(log("mounter.TearDownAt failed: %v", err))
return err
}
// clean mount point dir
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
klog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
return err
}
glog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir))
klog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir))
return nil
}
// applyFSGroup applies the volume ownership it derives its logic
// from https://github.com/kubernetes/kubernetes/issues/66323
// 1) if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
// 2) if fstype is provided and pv.AccessMode == ReadWriteOnly and !c.spec.ReadOnly then apply fsgroup
func (c *csiMountMgr) applyFSGroup(fsType string, fsGroup *int64) error {
if fsGroup != nil {
if fsType == "" {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided"))
return nil
}
accessModes := c.spec.PersistentVolume.Spec.AccessModes
if c.spec.PersistentVolume.Spec.AccessModes == nil {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided"))
return nil
}
if !hasReadWriteOnce(accessModes) {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode"))
return nil
}
if c.readOnly {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, volume is readOnly"))
return nil
}
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
return err
}
klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *fsGroup, c.volumeID))
}
return nil
}
@ -293,7 +366,7 @@ func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
mounter := plug.host.GetMounter(plug.GetPluginName())
notMnt, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
klog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
return false, err
}
return !notMnt, nil
@ -301,39 +374,39 @@ func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
// removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir
func removeMountDir(plug *csiPlugin, mountPath string) error {
glog.V(4).Info(log("removing mount path [%s]", mountPath))
klog.V(4).Info(log("removing mount path [%s]", mountPath))
if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil {
glog.Error(log("failed while checking mount path stat [%s]", pathErr))
klog.Error(log("failed while checking mount path stat [%s]", pathErr))
return pathErr
} else if !pathExists {
glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath))
klog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath))
return nil
}
mounter := plug.host.GetMounter(plug.GetPluginName())
notMnt, err := mounter.IsLikelyNotMountPoint(mountPath)
if err != nil {
glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err))
klog.Error(log("mount dir removal failed [%s]: %v", mountPath, err))
return err
}
if notMnt {
glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
klog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to remove dir [%s]: %v", mountPath, err))
klog.Error(log("failed to remove dir [%s]: %v", mountPath, err))
return err
}
// remove volume data file as well
volPath := path.Dir(mountPath)
dataFile := path.Join(volPath, volDataFileName)
glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
klog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
klog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
return err
}
// remove volume path
glog.V(4).Info(log("deleting volume path [%s]", volPath))
klog.V(4).Info(log("deleting volume path [%s]", volPath))
if err := os.Remove(volPath); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to delete volume path [%s]: %v", volPath, err))
klog.Error(log("failed to delete volume path [%s]: %v", volPath, err))
return err
}
}

View File

@ -25,25 +25,35 @@ import (
"path"
"testing"
"reflect"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
fakeclient "k8s.io/client-go/kubernetes/fake"
csiapi "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
var (
testDriver = "test-driver"
testVol = "vol-123"
testns = "test-ns"
testPodUID = types.UID("test-pod")
testDriver = "test-driver"
testVol = "vol-123"
testns = "test-ns"
testPod = "test-pod"
testPodUID = types.UID("test-pod")
testAccount = "test-service-account"
)
func TestMounterGetPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
@ -65,6 +75,7 @@ func TestMounterGetPath(t *testing.T) {
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mounter, err := plug.NewMounter(
@ -85,90 +96,306 @@ func TestMounterGetPath(t *testing.T) {
}
}
func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, podInfoEnabled)()
tests := []struct {
name string
driver string
volumeContext map[string]string
expectedVolumeContext map[string]string
}{
{
name: "no pod info",
driver: "no-info",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "no CSIDriver -> no pod info",
driver: "unknown-driver",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "CSIDriver with PodInfoRequiredOnMount=nil -> no pod info",
driver: "nil",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "no pod info -> keep existing volumeContext",
driver: "no-info",
volumeContext: map[string]string{"foo": "bar"},
expectedVolumeContext: map[string]string{"foo": "bar"},
},
{
name: "add pod info",
driver: "info",
volumeContext: nil,
expectedVolumeContext: map[string]string{"csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
{
name: "add pod info -> keep existing volumeContext",
driver: "info",
volumeContext: map[string]string{"foo": "bar"},
expectedVolumeContext: map[string]string{"foo": "bar", "csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
}
emptyPodMountInfoVersion := ""
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
klog.Infof("Starting test %s", test.name)
fakeClient := fakeclient.NewSimpleClientset()
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("no-info", &emptyPodMountInfoVersion, nil),
getCSIDriver("info", &currentPodInfoMountVersion, nil),
getCSIDriver("nil", nil, nil),
)
plug, tmpDir := newTestPlugin(t, fakeClient, fakeCSIClient)
defer os.RemoveAll(tmpDir)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return plug.csiDriverInformer.Informer().HasSynced(), nil
})
}
registerFakePlugin(test.driver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, test.driver, testVol)
pv.Spec.CSI.VolumeAttributes = test.volumeContext
pv.Spec.MountOptions = []string{"foo=bar", "baz=qux"}
pvName := pv.GetName()
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{
ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
Spec: api.PodSpec{
ServiceAccountName: testAccount,
},
},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: "test-node",
Attacher: csiPluginName,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
},
}
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
// Mounter.SetUp()
fsGroup := int64(2000)
if err := csiMounter.SetUp(&fsGroup); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
vol, ok := pubs[csiMounter.volumeID]
if !ok {
t.Error("csi server may not have received NodePublishVolume call")
}
if vol.Path != csiMounter.GetPath() {
t.Errorf("csi server expected path %s, got %s", csiMounter.GetPath(), vol.Path)
}
if !reflect.DeepEqual(vol.MountFlags, pv.Spec.MountOptions) {
t.Errorf("csi server expected mount options %v, got %v", pv.Spec.MountOptions, vol.MountFlags)
}
if podInfoEnabled {
if !reflect.DeepEqual(vol.VolumeContext, test.expectedVolumeContext) {
t.Errorf("csi server expected volumeContext %+v, got %+v", test.expectedVolumeContext, vol.VolumeContext)
}
} else {
// CSIPodInfo feature is disabled, we expect no modifications to volumeContext.
if !reflect.DeepEqual(vol.VolumeContext, test.volumeContext) {
t.Errorf("csi server expected volumeContext %+v, got %+v", test.volumeContext, vol.VolumeContext)
}
}
})
}
}
func TestMounterSetUp(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
t.Run("WithCSIPodInfo", func(t *testing.T) {
MounterSetUpTests(t, true)
})
t.Run("WithoutCSIPodInfo", func(t *testing.T) {
MounterSetUpTests(t, false)
})
}
func TestMounterSetUpWithFSGroup(t *testing.T) {
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
tmpDir,
fakeClient,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
pvName := pv.GetName()
plug, tmpDir := newTestPlugin(t, fakeClient, nil)
defer os.RemoveAll(tmpDir)
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: "test-node",
Attacher: csiPluginName,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
testCases := []struct {
name string
accessModes []api.PersistentVolumeAccessMode
readOnly bool
fsType string
setFsGroup bool
fsGroup int64
}{
{
name: "default fstype, with no fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
{
name: "default fstype with fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteMany,
api.ReadOnlyMany,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: true,
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO provided (should apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
}
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
// Mounter.SetUp()
fsGroup := int64(2000)
if err := csiMounter.SetUp(&fsGroup); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
for i, tc := range testCases {
t.Logf("Running test %s", tc.name)
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
volName := fmt.Sprintf("test-vol-%d", i)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, volName)
pv.Spec.AccessModes = tc.accessModes
pvName := pv.GetName()
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
spec := volume.NewSpecFromPersistentVolume(pv, tc.readOnly)
if tc.fsType != "" {
spec.PersistentVolume.Spec.CSI.FSType = tc.fsType
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID] != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
mounter, err := plug.NewMounter(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := makeTestAttachment(attachID, "test-node", pvName)
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Errorf("failed to setup VolumeAttachment: %v", err)
continue
}
// Mounter.SetUp()
var fsGroupPtr *int64
if tc.setFsGroup {
fsGroup := tc.fsGroup
fsGroupPtr = &fsGroup
}
if err := csiMounter.SetUp(fsGroupPtr); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != len(tc.fsType) {
t.Errorf("file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
}
}
}
func TestUnmounterTeardown(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file prior to unmount
@ -216,7 +443,7 @@ func TestUnmounterTeardown(t *testing.T) {
}
func TestSaveVolumeData(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
@ -262,3 +489,15 @@ func TestSaveVolumeData(t *testing.T) {
}
}
}
func getCSIDriver(name string, podInfoMountVersion *string, attachable *bool) *csiapi.CSIDriver {
return &csiapi.CSIDriver{
ObjectMeta: meta.ObjectMeta{
Name: name,
},
Spec: csiapi.CSIDriverSpec{
PodInfoOnMountVersion: podInfoMountVersion,
AttachRequired: attachable,
},
}
}

View File

@ -21,19 +21,29 @@ import (
"fmt"
"os"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/golang/glog"
"context"
"k8s.io/klog"
api "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
csiapiinformer "k8s.io/csi-api/pkg/client/informers/externalversions"
csiinformer "k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1"
csilister "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
"k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
)
const (
@ -48,11 +58,18 @@ const (
volNameSep = "^"
volDataFileName = "vol_data.json"
fsTypeBlockName = "block"
// TODO: increase to something useful
csiResyncPeriod = time.Minute
)
var deprecatedSocketDirVersions = []string{"0.1.0", "0.2.0", "0.3.0", "0.4.0"}
type csiPlugin struct {
host volume.VolumeHost
blockEnabled bool
host volume.VolumeHost
blockEnabled bool
csiDriverLister csilister.CSIDriverLister
csiDriverInformer csiinformer.CSIDriverInformer
}
// ProbeVolumePlugins returns implemented plugins
@ -68,8 +85,9 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
var _ volume.VolumePlugin = &csiPlugin{}
type csiDriver struct {
driverName string
driverEndpoint string
driverName string
driverEndpoint string
highestSupportedVersion *utilversion.Version
}
type csiDriversStore struct {
@ -77,43 +95,163 @@ type csiDriversStore struct {
sync.RWMutex
}
// RegistrationHandler is the handler which is fed to the pluginwatcher API.
type RegistrationHandler struct {
}
// TODO (verult) consider using a struct instead of global variables
// csiDrivers map keep track of all registered CSI drivers on the node and their
// corresponding sockets
var csiDrivers csiDriversStore
var lm labelmanager.Interface
var nim nodeinfomanager.Interface
// RegistrationCallback is called by kubelet's plugin watcher upon detection
// PluginHandler is the plugin registration handler interface passed to the
// pluginwatcher module in kubelet
var PluginHandler = &RegistrationHandler{}
// ValidatePlugin is called by kubelet's plugin watcher upon detection
// of a new registration socket opened by CSI Driver registrar side car.
func RegistrationCallback(pluginName string, endpoint string, versions []string, socketPath string) (error, chan bool) {
func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error {
klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s, foundInDeprecatedDir: %v",
pluginName, endpoint, strings.Join(versions, ","), foundInDeprecatedDir))
glog.Infof(log("Callback from kubelet with plugin name: %s endpoint: %s versions: %s socket path: %s",
pluginName, endpoint, strings.Join(versions, ","), socketPath))
if endpoint == "" {
endpoint = socketPath
if foundInDeprecatedDir {
// CSI 0.x drivers used /var/lib/kubelet/plugins as the socket dir.
// This was deprecated as the socket dir for kubelet drivers, in lieu of a dedicated dir /var/lib/kubelet/plugins_registry
// The deprecated dir will only be allowed for a whitelisted set of old versions.
// CSI 1.x drivers should use the /var/lib/kubelet/plugins_registry
if !isDeprecatedSocketDirAllowed(versions) {
err := fmt.Errorf("socket for CSI driver %q versions %v was found in a deprecated dir. Drivers implementing CSI 1.x+ must use the new dir", pluginName, versions)
klog.Error(err)
return err
}
}
// Calling nodeLabelManager to update label for newly registered CSI driver
err := lm.AddLabels(pluginName)
_, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions)
return err
}
// RegisterPlugin is called when a plugin can be registered
func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string) error {
klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions)
if err != nil {
return err, nil
return err
}
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
csiDrivers.Lock()
defer csiDrivers.Unlock()
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint}
return nil, nil
func() {
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
// It's not necessary to lock the entire RegistrationCallback() function because only the CSI
// client depends on this driver map, and the CSI client does not depend on node information
// updated in the rest of the function.
csiDrivers.Lock()
defer csiDrivers.Unlock()
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint, highestSupportedVersion: highestSupportedVersion}
}()
// Get node info from the driver.
csi, err := newCsiDriverClient(csiDriverName(pluginName))
if err != nil {
return err
}
// TODO (verult) retry with exponential backoff, possibly added in csi client library.
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
driverNodeID, maxVolumePerNode, accessibleTopology, err := csi.NodeGetInfo(ctx)
if err != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed at CSI.NodeGetInfo: %v", err))
if unregErr := unregisterDriver(pluginName); unregErr != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous: %v", unregErr))
return unregErr
}
return err
}
err = nim.InstallCSIDriver(pluginName, driverNodeID, maxVolumePerNode, accessibleTopology)
if err != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed at AddNodeInfo: %v", err))
if unregErr := unregisterDriver(pluginName); unregErr != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr))
return unregErr
}
return err
}
return nil
}
func (h *RegistrationHandler) validateVersions(callerName, pluginName string, endpoint string, versions []string) (*utilversion.Version, error) {
if len(versions) == 0 {
err := fmt.Errorf("%s for CSI driver %q failed. Plugin returned an empty list for supported versions", callerName, pluginName)
klog.Error(err)
return nil, err
}
// Validate version
newDriverHighestVersion, err := highestSupportedVersion(versions)
if err != nil {
err := fmt.Errorf("%s for CSI driver %q failed. None of the versions specified %q are supported. err=%v", callerName, pluginName, versions, err)
klog.Error(err)
return nil, err
}
// Check for existing drivers with the same name
var existingDriver csiDriver
driverExists := false
func() {
csiDrivers.RLock()
defer csiDrivers.RUnlock()
existingDriver, driverExists = csiDrivers.driversMap[pluginName]
}()
if driverExists {
if !existingDriver.highestSupportedVersion.LessThan(newDriverHighestVersion) {
err := fmt.Errorf("%s for CSI driver %q failed. Another driver with the same name is already registered with a higher supported version: %q", callerName, pluginName, existingDriver.highestSupportedVersion)
klog.Error(err)
return nil, err
}
}
return newDriverHighestVersion, nil
}
// DeRegisterPlugin is called when a plugin removed its socket, signaling
// it is no longer available
func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) {
klog.V(4).Info(log("registrationHandler.DeRegisterPlugin request for plugin %s", pluginName))
if err := unregisterDriver(pluginName); err != nil {
klog.Error(log("registrationHandler.DeRegisterPlugin failed: %v", err))
}
}
func (p *csiPlugin) Init(host volume.VolumeHost) error {
glog.Info(log("plugin initializing..."))
p.host = host
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
csiClient := host.GetCSIClient()
if csiClient == nil {
klog.Warning("The client for CSI Custom Resources is not available, skipping informer initialization")
} else {
// Start informer for CSIDrivers.
factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod)
p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers()
p.csiDriverLister = p.csiDriverInformer.Lister()
go factory.Start(wait.NeverStop)
}
}
// Initializing csiDrivers map and label management channels
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
lm = labelmanager.NewLabelManager(host.GetNodeName(), host.GetKubeClient())
nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host)
// TODO(#70514) Init CSINodeInfo object if the CRD exists and create Driver
// objects for migrated drivers.
return nil
}
@ -127,7 +265,7 @@ func (p *csiPlugin) GetPluginName() string {
func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
csi, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err))
klog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err))
return "", err
}
@ -160,11 +298,14 @@ func (p *csiPlugin) NewMounter(
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("failed to get a kubernetes client"))
klog.Error(log("failed to get a kubernetes client"))
return nil, errors.New("failed to get a Kubernetes client")
}
csi := newCsiDriverClient(pvSource.Driver)
csi, err := newCsiDriverClient(csiDriverName(pvSource.Driver))
if err != nil {
return nil, err
}
mounter := &csiMountMgr{
plugin: p,
@ -172,7 +313,7 @@ func (p *csiPlugin) NewMounter(
spec: spec,
pod: pod,
podUID: pod.UID,
driverName: pvSource.Driver,
driverName: csiDriverName(pvSource.Driver),
volumeID: pvSource.VolumeHandle,
specVolumeID: spec.Name(),
csiClient: csi,
@ -184,10 +325,10 @@ func (p *csiPlugin) NewMounter(
dataDir := path.Dir(dir) // dropoff /mount at end
if err := os.MkdirAll(dataDir, 0750); err != nil {
glog.Error(log("failed to create dir %#v: %v", dataDir, err))
klog.Error(log("failed to create dir %#v: %v", dataDir, err))
return nil, err
}
glog.V(4).Info(log("created path successfully [%s]", dataDir))
klog.V(4).Info(log("created path successfully [%s]", dataDir))
// persist volume info data for teardown
node := string(p.host.GetNodeName())
@ -201,21 +342,21 @@ func (p *csiPlugin) NewMounter(
}
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
klog.Error(log("failed to save volume info data: %v", err))
if err := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
return nil, err
}
return nil, err
}
glog.V(4).Info(log("mounter created successfully"))
klog.V(4).Info(log("mounter created successfully"))
return mounter, nil
}
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
unmounter := &csiMountMgr{
plugin: p,
@ -228,27 +369,31 @@ func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmo
dataDir := path.Dir(dir) // dropoff /mount at end
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err))
klog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err))
return nil, err
}
unmounter.driverName = data[volDataKey.driverName]
unmounter.driverName = csiDriverName(data[volDataKey.driverName])
unmounter.volumeID = data[volDataKey.volHandle]
unmounter.csiClient = newCsiDriverClient(unmounter.driverName)
unmounter.csiClient, err = newCsiDriverClient(unmounter.driverName)
if err != nil {
return nil, err
}
return unmounter, nil
}
func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
klog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
volData, err := loadVolumeData(mountPath, volDataFileName)
if err != nil {
glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
klog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
return nil, err
}
glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
klog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
fsMode := api.PersistentVolumeFilesystem
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: volData[volDataKey.specVolID],
@ -260,6 +405,7 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
VolumeHandle: volData[volDataKey.volHandle],
},
},
VolumeMode: &fsMode,
},
}
@ -268,8 +414,11 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
func (p *csiPlugin) SupportsMountOption() bool {
// TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags
// to probe for the result for this method:w
return false
// to probe for the result for this method
// (bswartz) Until the CSI spec supports probing, our only option is to
// make plugins register their support for mount options or lack thereof
// directly with kubernetes.
return true
}
func (p *csiPlugin) SupportsBulkVolumeVerification() bool {
@ -279,10 +428,12 @@ func (p *csiPlugin) SupportsBulkVolumeVerification() bool {
// volume.AttachableVolumePlugin methods
var _ volume.AttachableVolumePlugin = &csiPlugin{}
var _ volume.DeviceMountableVolumePlugin = &csiPlugin{}
func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("unable to get kubernetes client from host"))
klog.Error(log("unable to get kubernetes client from host"))
return nil, errors.New("unable to get Kubernetes client")
}
@ -293,10 +444,14 @@ func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (p *csiPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return p.NewAttacher()
}
func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("unable to get kubernetes client from host"))
klog.Error(log("unable to get kubernetes client from host"))
return nil, errors.New("unable to get Kubernetes client")
}
@ -307,9 +462,13 @@ func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (p *csiPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return p.NewDetacher()
}
func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := p.host.GetMounter(p.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
return m.GetMountRefs(deviceMountPath)
}
// BlockVolumePlugin methods
@ -329,12 +488,15 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
return nil, err
}
glog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
client := newCsiDriverClient(pvSource.Driver)
klog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
client, err := newCsiDriverClient(csiDriverName(pvSource.Driver))
if err != nil {
return nil, err
}
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("failed to get a kubernetes client"))
klog.Error(log("failed to get a kubernetes client"))
return nil, errors.New("failed to get a Kubernetes client")
}
@ -343,9 +505,10 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
k8s: k8s,
plugin: p,
volumeID: pvSource.VolumeHandle,
driverName: pvSource.Driver,
driverName: csiDriverName(pvSource.Driver),
readOnly: readOnly,
spec: spec,
specName: spec.Name(),
podUID: podRef.UID,
}
@ -353,10 +516,10 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
dataDir := getVolumeDeviceDataDir(spec.Name(), p.host)
if err := os.MkdirAll(dataDir, 0750); err != nil {
glog.Error(log("failed to create data dir %s: %v", dataDir, err))
klog.Error(log("failed to create data dir %s: %v", dataDir, err))
return nil, err
}
glog.V(4).Info(log("created path successfully [%s]", dataDir))
klog.V(4).Info(log("created path successfully [%s]", dataDir))
// persist volume info data for teardown
node := string(p.host.GetNodeName())
@ -370,9 +533,9 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
}
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
klog.Error(log("failed to save volume info data: %v", err))
if err := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
return nil, err
}
return nil, err
@ -386,7 +549,7 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo
return nil, errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
klog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
unmapper := &csiBlockMapper{
plugin: p,
podUID: podUID,
@ -397,12 +560,15 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo
dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host)
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
klog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
return nil, err
}
unmapper.driverName = data[volDataKey.driverName]
unmapper.driverName = csiDriverName(data[volDataKey.driverName])
unmapper.volumeID = data[volDataKey.volHandle]
unmapper.csiClient = newCsiDriverClient(unmapper.driverName)
unmapper.csiClient, err = newCsiDriverClient(unmapper.driverName)
if err != nil {
return nil, err
}
return unmapper, nil
}
@ -412,16 +578,16 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP
return nil, errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
klog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
dataDir := getVolumeDeviceDataDir(specVolName, p.host)
volData, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
klog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
return nil, err
}
glog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
klog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
blockMode := api.PersistentVolumeBlock
pv := &api.PersistentVolume{
@ -441,3 +607,129 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP
return volume.NewSpecFromPersistentVolume(pv, false), nil
}
func (p *csiPlugin) skipAttach(driver string) (bool, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
return false, nil
}
if p.csiDriverLister == nil {
return false, errors.New("CSIDriver lister does not exist")
}
csiDriver, err := p.csiDriverLister.Get(driver)
if err != nil {
if apierrs.IsNotFound(err) {
// Don't skip attach if CSIDriver does not exist
return false, nil
}
return false, err
}
if csiDriver.Spec.AttachRequired != nil && *csiDriver.Spec.AttachRequired == false {
return true, nil
}
return false, nil
}
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
skip, err := p.skipAttach(driver)
if err != nil {
return nil, err
}
if skip {
return nil, nil
}
attachID := getAttachmentName(handle, driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := client.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
return nil, err // This err already has enough context ("VolumeAttachment xyz not found")
}
if attachment == nil {
err = errors.New("no existing VolumeAttachment found")
return nil, err
}
return attachment.Status.AttachmentMetadata, nil
}
func unregisterDriver(driverName string) error {
func() {
csiDrivers.Lock()
defer csiDrivers.Unlock()
delete(csiDrivers.driversMap, driverName)
}()
if err := nim.UninstallCSIDriver(driverName); err != nil {
klog.Errorf("Error uninstalling CSI driver: %v", err)
return err
}
return nil
}
// Return the highest supported version
func highestSupportedVersion(versions []string) (*utilversion.Version, error) {
if len(versions) == 0 {
return nil, fmt.Errorf("CSI driver reporting empty array for supported versions")
}
// Sort by lowest to highest version
sort.Slice(versions, func(i, j int) bool {
parsedVersionI, err := utilversion.ParseGeneric(versions[i])
if err != nil {
// Push bad values to the bottom
return true
}
parsedVersionJ, err := utilversion.ParseGeneric(versions[j])
if err != nil {
// Push bad values to the bottom
return false
}
return parsedVersionI.LessThan(parsedVersionJ)
})
for i := len(versions) - 1; i >= 0; i-- {
highestSupportedVersion, err := utilversion.ParseGeneric(versions[i])
if err != nil {
return nil, err
}
if highestSupportedVersion.Major() <= 1 {
return highestSupportedVersion, nil
}
}
return nil, fmt.Errorf("None of the CSI versions reported by this driver are supported")
}
// Only drivers that implement CSI 0.x are allowed to use deprecated socket dir.
func isDeprecatedSocketDirAllowed(versions []string) bool {
for _, version := range versions {
if isV0Version(version) {
return true
}
}
return false
}
func isV0Version(version string) bool {
parsedVersion, err := utilversion.ParseGeneric(version)
if err != nil {
return false
}
return parsedVersion.Major() == 0
}
func isV1Version(version string) bool {
parsedVersion, err := utilversion.ParseGeneric(version)
if err != nil {
return false
}
return parsedVersion.Major() == 1
}

View File

@ -27,30 +27,36 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
fakeclient "k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
// create a plugin mgr to load plugins and setup a fake client
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
if err != nil {
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
}
func newTestPlugin(t *testing.T, client *fakeclient.Clientset, csiClient *fakecsi.Clientset) (*csiPlugin, string) {
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
}
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHost(
if client == nil {
client = fakeclient.NewSimpleClientset()
}
if csiClient == nil {
csiClient = fakecsi.NewSimpleClientset()
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
client,
csiClient,
nil,
"fakeNode",
)
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
@ -65,6 +71,13 @@ func newTestPlugin(t *testing.T) (*csiPlugin, string) {
t.Fatalf("cannot assert plugin to be type csiPlugin")
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return csiPlug.csiDriverInformer.Informer().HasSynced(), nil
})
}
return csiPlug, tmpDir
}
@ -91,8 +104,20 @@ func makeTestPV(name string, sizeGig int, driverName, volID string) *api.Persist
}
}
func registerFakePlugin(pluginName, endpoint string, versions []string, t *testing.T) {
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
highestSupportedVersions, err := highestSupportedVersion(versions)
if err != nil {
t.Fatalf("unexpected error parsing versions (%v) for pluginName % q endpoint %q: %#v", versions, pluginName, endpoint, err)
}
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint, highestSupportedVersion: highestSupportedVersions}
}
func TestPluginGetPluginName(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
if plug.GetPluginName() != "kubernetes.io/csi" {
t.Errorf("unexpected plugin name %v", plug.GetPluginName())
@ -100,7 +125,9 @@ func TestPluginGetPluginName(t *testing.T) {
}
func TestPluginGetVolumeName(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
@ -116,6 +143,7 @@ func TestPluginGetVolumeName(t *testing.T) {
for _, tc := range testCases {
t.Logf("testing: %s", tc.name)
registerFakePlugin(tc.driverName, "endpoint", []string{"0.3.0"}, t)
pv := makeTestPV("test-pv", 10, tc.driverName, tc.volName)
spec := volume.NewSpecFromPersistentVolume(pv, false)
name, err := plug.GetVolumeName(spec)
@ -129,9 +157,12 @@ func TestPluginGetVolumeName(t *testing.T) {
}
func TestPluginCanSupport(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, false)
@ -141,7 +172,9 @@ func TestPluginCanSupport(t *testing.T) {
}
func TestPluginConstructVolumeSpec(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
@ -186,6 +219,14 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
}
if spec.PersistentVolume.Spec.VolumeMode == nil {
t.Fatalf("Volume mode has not been set.")
}
if *spec.PersistentVolume.Spec.VolumeMode != api.PersistentVolumeFilesystem {
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
}
if spec.Name() != tc.specVolID {
t.Errorf("Unexpected spec name %s", spec.Name())
}
@ -193,9 +234,12 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
}
func TestPluginNewMounter(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.2.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
@ -212,7 +256,7 @@ func TestPluginNewMounter(t *testing.T) {
csiMounter := mounter.(*csiMountMgr)
// validate mounter fields
if csiMounter.driverName != testDriver {
if string(csiMounter.driverName) != testDriver {
t.Error("mounter driver name not set")
}
if csiMounter.volumeID != testVol {
@ -241,9 +285,12 @@ func TestPluginNewMounter(t *testing.T) {
}
func TestPluginNewUnmounter(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file to re-create client
@ -286,7 +333,9 @@ func TestPluginNewUnmounter(t *testing.T) {
}
func TestPluginNewAttacher(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -304,7 +353,9 @@ func TestPluginNewAttacher(t *testing.T) {
}
func TestPluginNewDetacher(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
detacher, err := plug.NewDetacher()
@ -322,9 +373,12 @@ func TestPluginNewDetacher(t *testing.T) {
}
func TestPluginNewBlockMapper(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
mounter, err := plug.NewBlockVolumeMapper(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
@ -341,7 +395,7 @@ func TestPluginNewBlockMapper(t *testing.T) {
csiMapper := mounter.(*csiBlockMapper)
// validate mounter fields
if csiMapper.driverName != testDriver {
if string(csiMapper.driverName) != testDriver {
t.Error("CSI block mapper missing driver name")
}
if csiMapper.volumeID != testVol {
@ -367,9 +421,12 @@ func TestPluginNewBlockMapper(t *testing.T) {
}
func TestPluginNewUnmapper(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file to re-create client
@ -415,7 +472,7 @@ func TestPluginNewUnmapper(t *testing.T) {
}
// test loaded vol data
if csiUnmapper.driverName != testDriver {
if string(csiUnmapper.driverName) != testDriver {
t.Error("unmapper driverName not set")
}
if csiUnmapper.volumeID != testVol {
@ -424,7 +481,9 @@ func TestPluginNewUnmapper(t *testing.T) {
}
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
@ -463,6 +522,14 @@ func TestPluginConstructBlockVolumeSpec(t *testing.T) {
continue
}
if spec.PersistentVolume.Spec.VolumeMode == nil {
t.Fatalf("Volume mode has not been set.")
}
if *spec.PersistentVolume.Spec.VolumeMode != api.PersistentVolumeBlock {
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
}
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
if volHandle != tc.data[volDataKey.volHandle] {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
@ -473,3 +540,433 @@ func TestPluginConstructBlockVolumeSpec(t *testing.T) {
}
}
}
func TestValidatePlugin(t *testing.T) {
testCases := []struct {
pluginName string
endpoint string
versions []string
foundInDeprecatedDir bool
shouldFail bool
}{
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.0.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.3.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.2.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.0.0"},
foundInDeprecatedDir: true,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v0.3.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"0.2.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.2.0", "v0.3.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"0.2.0", "v0.3.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.2.0", "v1.0.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"0.2.0", "v1.0.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.2.0", "v1.2.3"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"0.2.0", "v1.2.3"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.2.3", "v0.3.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.2.3", "v0.3.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.2.3", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.2.3", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v0.3.0", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"4.9.12", "2.0.1"},
foundInDeprecatedDir: false,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"4.9.12", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{},
foundInDeprecatedDir: false,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{},
foundInDeprecatedDir: true,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"var", "boo", "foo"},
foundInDeprecatedDir: false,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"var", "boo", "foo"},
foundInDeprecatedDir: true,
shouldFail: true,
},
}
for _, tc := range testCases {
// Arrange & Act
err := PluginHandler.ValidatePlugin(tc.pluginName, tc.endpoint, tc.versions, tc.foundInDeprecatedDir)
// Assert
if tc.shouldFail && err == nil {
t.Fatalf("expecting ValidatePlugin to fail, but got nil error for testcase: %#v", tc)
}
if !tc.shouldFail && err != nil {
t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err)
}
}
}
func TestValidatePluginExistingDriver(t *testing.T) {
testCases := []struct {
pluginName1 string
endpoint1 string
versions1 []string
pluginName2 string
endpoint2 string
versions2 []string
foundInDeprecatedDir2 bool
shouldFail bool
}{
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin2",
endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: false,
shouldFail: false,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin2",
endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: true,
shouldFail: true,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: false,
shouldFail: true,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: false,
shouldFail: true,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: true,
shouldFail: true,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions1: []string{"v0.3.0", "0.2.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions2: []string{"1.0.0"},
foundInDeprecatedDir2: false,
shouldFail: false,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions1: []string{"v0.3.0", "0.2.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions2: []string{"1.0.0"},
foundInDeprecatedDir2: true,
shouldFail: true,
},
}
for _, tc := range testCases {
// Arrange & Act
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
highestSupportedVersions1, err := highestSupportedVersion(tc.versions1)
if err != nil {
t.Fatalf("unexpected error parsing version for testcase: %#v", tc)
}
csiDrivers.driversMap[tc.pluginName1] = csiDriver{driverName: tc.pluginName1, driverEndpoint: tc.endpoint1, highestSupportedVersion: highestSupportedVersions1}
// Arrange & Act
err = PluginHandler.ValidatePlugin(tc.pluginName2, tc.endpoint2, tc.versions2, tc.foundInDeprecatedDir2)
// Assert
if tc.shouldFail && err == nil {
t.Fatalf("expecting ValidatePlugin to fail, but got nil error for testcase: %#v", tc)
}
if !tc.shouldFail && err != nil {
t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err)
}
}
}
func TestHighestSupportedVersion(t *testing.T) {
testCases := []struct {
versions []string
expectedHighestSupportedVersion string
shouldFail bool
}{
{
versions: []string{"v1.0.0"},
expectedHighestSupportedVersion: "1.0.0",
shouldFail: false,
},
{
versions: []string{"0.3.0"},
expectedHighestSupportedVersion: "0.3.0",
shouldFail: false,
},
{
versions: []string{"0.2.0"},
expectedHighestSupportedVersion: "0.2.0",
shouldFail: false,
},
{
versions: []string{"1.0.0"},
expectedHighestSupportedVersion: "1.0.0",
shouldFail: false,
},
{
versions: []string{"v0.3.0"},
expectedHighestSupportedVersion: "0.3.0",
shouldFail: false,
},
{
versions: []string{"0.2.0"},
expectedHighestSupportedVersion: "0.2.0",
shouldFail: false,
},
{
versions: []string{"0.2.0", "v0.3.0"},
expectedHighestSupportedVersion: "0.3.0",
shouldFail: false,
},
{
versions: []string{"0.2.0", "v1.0.0"},
expectedHighestSupportedVersion: "1.0.0",
shouldFail: false,
},
{
versions: []string{"0.2.0", "v1.2.3"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{"v1.2.3", "v0.3.0"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{"v1.2.3", "v0.3.0", "2.0.1"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{"4.9.12", "2.0.1"},
expectedHighestSupportedVersion: "",
shouldFail: true,
},
{
versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{},
expectedHighestSupportedVersion: "",
shouldFail: true,
},
{
versions: []string{"var", "boo", "foo"},
expectedHighestSupportedVersion: "",
shouldFail: true,
},
}
for _, tc := range testCases {
// Arrange & Act
actual, err := highestSupportedVersion(tc.versions)
// Assert
if tc.shouldFail && err == nil {
t.Fatalf("expecting highestSupportedVersion to fail, but got nil error for testcase: %#v", tc)
}
if !tc.shouldFail && err != nil {
t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err)
}
if tc.expectedHighestSupportedVersion != "" {
result, err := actual.Compare(tc.expectedHighestSupportedVersion)
if err != nil {
t.Fatalf("comparison failed with %v for testcase %#v", err, tc)
}
if result != 0 {
t.Fatalf("expectedHighestSupportedVersion %v, but got %v for tc: %#v", tc.expectedHighestSupportedVersion, actual, tc)
}
}
}
}

View File

@ -22,19 +22,25 @@ import (
"os"
"path"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"time"
)
const (
testInformerSyncPeriod = 100 * time.Millisecond
testInformerSyncTimeout = 30 * time.Second
)
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) {
credentials := map[string]string{}
secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{})
if err != nil {
glog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
klog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
return credentials, err
}
for key, value := range secret.Data {
@ -47,18 +53,18 @@ func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretRef
// saveVolumeData persists parameter data as json file at the provided location
func saveVolumeData(dir string, fileName string, data map[string]string) error {
dataFilePath := path.Join(dir, fileName)
glog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
klog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
file, err := os.Create(dataFilePath)
if err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
klog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
defer file.Close()
if err := json.NewEncoder(file).Encode(data); err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
klog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
klog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
return nil
}
@ -66,17 +72,17 @@ func saveVolumeData(dir string, fileName string, data map[string]string) error {
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
// remove /mount at the end
dataFileName := path.Join(dir, fileName)
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
klog.V(4).Info(log("loading volume data file [%s]", dataFileName))
file, err := os.Open(dataFileName)
if err != nil {
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
klog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
return nil, err
}
defer file.Close()
data := map[string]string{}
if err := json.NewDecoder(file).Decode(&data); err != nil {
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
klog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
return nil, err
}
@ -121,3 +127,16 @@ func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data")
}
// hasReadWriteOnce returns true if modes contains v1.ReadWriteOnce
func hasReadWriteOnce(modes []api.PersistentVolumeAccessMode) bool {
if modes == nil {
return false
}
for _, mode := range modes {
if mode == api.ReadWriteOnce {
return true
}
}
return false
}

28
vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/BUILD generated vendored Normal file
View File

@ -0,0 +1,28 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["csi.pb.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/csiv0",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes/wrappers:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

5007
vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,11 +2,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["fake_client.go"],
srcs = [
"fake_client.go",
"fake_closer.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/csi/fake",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)

View File

@ -23,7 +23,7 @@ import (
"google.golang.org/grpc"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
csipb "github.com/container-storage-interface/spec/lib/go/csi"
)
// IdentityClient is a CSI identity client used for testing
@ -56,19 +56,26 @@ func (f *IdentityClient) Probe(ctx context.Context, in *csipb.ProbeRequest, opts
return nil, nil
}
type CSIVolume struct {
VolumeContext map[string]string
Path string
MountFlags []string
}
// NodeClient returns CSI node client
type NodeClient struct {
nodePublishedVolumes map[string]string
nodeStagedVolumes map[string]string
nodePublishedVolumes map[string]CSIVolume
nodeStagedVolumes map[string]CSIVolume
stageUnstageSet bool
nodeGetInfoResp *csipb.NodeGetInfoResponse
nextErr error
}
// NewNodeClient returns fake node client
func NewNodeClient(stageUnstageSet bool) *NodeClient {
return &NodeClient{
nodePublishedVolumes: make(map[string]string),
nodeStagedVolumes: make(map[string]string),
nodePublishedVolumes: make(map[string]CSIVolume),
nodeStagedVolumes: make(map[string]CSIVolume),
stageUnstageSet: stageUnstageSet,
}
}
@ -78,18 +85,25 @@ func (f *NodeClient) SetNextError(err error) {
f.nextErr = err
}
func (f *NodeClient) SetNodeGetInfoResp(resp *csipb.NodeGetInfoResponse) {
f.nodeGetInfoResp = resp
}
// GetNodePublishedVolumes returns node published volumes
func (f *NodeClient) GetNodePublishedVolumes() map[string]string {
func (f *NodeClient) GetNodePublishedVolumes() map[string]CSIVolume {
return f.nodePublishedVolumes
}
// GetNodeStagedVolumes returns node staged volumes
func (f *NodeClient) GetNodeStagedVolumes() map[string]string {
func (f *NodeClient) GetNodeStagedVolumes() map[string]CSIVolume {
return f.nodeStagedVolumes
}
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string) {
f.nodeStagedVolumes[volID] = deviceMountPath
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string, volumeContext map[string]string) {
f.nodeStagedVolumes[volID] = CSIVolume{
Path: deviceMountPath,
VolumeContext: volumeContext,
}
}
// NodePublishVolume implements CSI NodePublishVolume
@ -110,7 +124,11 @@ func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePubli
if !strings.Contains(fsTypes, fsType) {
return nil, errors.New("invalid fstype")
}
f.nodePublishedVolumes[req.GetVolumeId()] = req.GetTargetPath()
f.nodePublishedVolumes[req.GetVolumeId()] = CSIVolume{
Path: req.GetTargetPath(),
VolumeContext: req.GetVolumeContext(),
MountFlags: req.GetVolumeCapability().GetMount().MountFlags,
}
return &csipb.NodePublishVolumeResponse{}, nil
}
@ -153,7 +171,10 @@ func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVo
return nil, errors.New("invalid fstype")
}
f.nodeStagedVolumes[req.GetVolumeId()] = req.GetStagingTargetPath()
f.nodeStagedVolumes[req.GetVolumeId()] = CSIVolume{
Path: req.GetStagingTargetPath(),
VolumeContext: req.GetVolumeContext(),
}
return &csipb.NodeStageVolumeResponse{}, nil
}
@ -174,9 +195,12 @@ func (f *NodeClient) NodeUnstageVolume(ctx context.Context, req *csipb.NodeUnsta
return &csipb.NodeUnstageVolumeResponse{}, nil
}
// NodeGetId implements method
func (f *NodeClient) NodeGetId(ctx context.Context, in *csipb.NodeGetIdRequest, opts ...grpc.CallOption) (*csipb.NodeGetIdResponse, error) {
return nil, nil
// NodeGetId implements csi method
func (f *NodeClient) NodeGetInfo(ctx context.Context, in *csipb.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipb.NodeGetInfoResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
}
return f.nodeGetInfoResp, nil
}
// NodeGetCapabilities implements csi method
@ -198,6 +222,11 @@ func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetC
return nil, nil
}
// NodeGetVolumeStats implements csi method
func (f *NodeClient) NodeGetVolumeStats(ctx context.Context, in *csipb.NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*csipb.NodeGetVolumeStatsResponse, error) {
return nil, nil
}
// ControllerClient represents a CSI Controller client
type ControllerClient struct {
nextCapabilities []*csipb.ControllerServiceCapability

View File

@ -0,0 +1,47 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"testing"
)
func NewCloser(t *testing.T) *Closer {
return &Closer{
t: t,
}
}
type Closer struct {
wasCalled bool
t *testing.T
}
func (c *Closer) Close() error {
c.wasCalled = true
return nil
}
func (c *Closer) Check() *Closer {
c.t.Helper()
if !c.wasCalled {
c.t.Error("expected closer to have been called")
}
return c
}

View File

@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["labelmanager.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/labelmanager",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,251 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package labelmanager includes internal functions used to add/delete labels to
// kubernetes nodes for corresponding CSI drivers
package labelmanager // import "k8s.io/kubernetes/pkg/volume/csi/labelmanager"
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/retry"
)
const (
// Name of node annotation that contains JSON map of driver names to node
// names
annotationKey = "csi.volume.kubernetes.io/nodeid"
csiPluginName = "kubernetes.io/csi"
)
// labelManagementStruct is struct of channels used for communication between the driver registration
// code and the go routine responsible for managing the node's labels
type labelManagerStruct struct {
nodeName types.NodeName
k8s kubernetes.Interface
}
// Interface implements an interface for managing labels of a node
type Interface interface {
AddLabels(driverName string) error
}
// NewLabelManager initializes labelManagerStruct and returns available interfaces
func NewLabelManager(nodeName types.NodeName, kubeClient kubernetes.Interface) Interface {
return labelManagerStruct{
nodeName: nodeName,
k8s: kubeClient,
}
}
// nodeLabelManager waits for labeling requests initiated by the driver's registration
// process.
func (lm labelManagerStruct) AddLabels(driverName string) error {
err := verifyAndAddNodeId(string(lm.nodeName), lm.k8s.CoreV1().Nodes(), driverName, string(lm.nodeName))
if err != nil {
return fmt.Errorf("failed to update node %s's annotation with error: %+v", lm.nodeName, err)
}
return nil
}
// Clones the given map and returns a new map with the given key and value added.
// Returns the given map, if annotationKey is empty.
func cloneAndAddAnnotation(
annotations map[string]string,
annotationKey,
annotationValue string) map[string]string {
if annotationKey == "" {
// Don't need to add an annotation.
return annotations
}
// Clone.
newAnnotations := map[string]string{}
for key, value := range annotations {
newAnnotations[key] = value
}
newAnnotations[annotationKey] = annotationValue
return newAnnotations
}
func verifyAndAddNodeId(
k8sNodeName string,
k8sNodesClient corev1.NodeInterface,
csiDriverName string,
csiDriverNodeId string) error {
// Add or update annotation on Node object
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
if getErr != nil {
glog.Errorf("Failed to get latest version of Node: %v", getErr)
return getErr // do not wrap error
}
var previousAnnotationValue string
if result.ObjectMeta.Annotations != nil {
previousAnnotationValue =
result.ObjectMeta.Annotations[annotationKey]
glog.V(3).Infof(
"previousAnnotationValue=%q", previousAnnotationValue)
}
existingDriverMap := map[string]string{}
if previousAnnotationValue != "" {
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKey,
previousAnnotationValue,
err)
}
}
if val, ok := existingDriverMap[csiDriverName]; ok {
if val == csiDriverNodeId {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key value {%q: %q} alredy eixst in node %q annotation, no need to update: %v",
csiDriverName,
csiDriverNodeId,
annotationKey,
previousAnnotationValue)
return nil
}
}
// Add/update annotation value
existingDriverMap[csiDriverName] = csiDriverNodeId
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return fmt.Errorf(
"failed while trying to add key value {%q: %q} to node %q annotation. Existing value: %v",
csiDriverName,
csiDriverNodeId,
annotationKey,
previousAnnotationValue)
}
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
result.ObjectMeta.Annotations,
annotationKey,
string(jsonObj))
_, updateErr := k8sNodesClient.Update(result)
if updateErr == nil {
fmt.Printf(
"Updated node %q successfully for CSI driver %q and CSI node name %q",
k8sNodeName,
csiDriverName,
csiDriverNodeId)
}
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}
// Fetches Kubernetes node API object corresponding to k8sNodeName.
// If the csiDriverName is present in the node annotation, it is removed.
func verifyAndDeleteNodeId(
k8sNodeName string,
k8sNodesClient corev1.NodeInterface,
csiDriverName string) error {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
if getErr != nil {
glog.Errorf("failed to get latest version of Node: %v", getErr)
return getErr // do not wrap error
}
var previousAnnotationValue string
if result.ObjectMeta.Annotations != nil {
previousAnnotationValue =
result.ObjectMeta.Annotations[annotationKey]
glog.V(3).Infof(
"previousAnnotationValue=%q", previousAnnotationValue)
}
existingDriverMap := map[string]string{}
if previousAnnotationValue == "" {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key %q does not exist in node %q annotation, no need to cleanup.",
csiDriverName,
annotationKey)
return nil
}
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKey,
previousAnnotationValue,
err)
}
if _, ok := existingDriverMap[csiDriverName]; !ok {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key %q does not eixst in node %q annotation, no need to cleanup: %v",
csiDriverName,
annotationKey,
previousAnnotationValue)
return nil
}
// Add/update annotation value
delete(existingDriverMap, csiDriverName)
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return fmt.Errorf(
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
csiDriverName,
annotationKey,
previousAnnotationValue)
}
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
result.ObjectMeta.Annotations,
annotationKey,
string(jsonObj))
_, updateErr := k8sNodesClient.Update(result)
if updateErr == nil {
fmt.Printf(
"Updated node %q annotation to remove CSI driver %q.",
k8sNodeName,
csiDriverName)
}
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}

29
vendor/k8s.io/kubernetes/pkg/volume/csi/main_test.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"testing"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
_ "k8s.io/kubernetes/pkg/features"
)
func TestMain(m *testing.M) {
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
}

View File

@ -0,0 +1,67 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["nodeinfomanager.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager",
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["nodeinfomanager_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core/helper:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)

View File

@ -0,0 +1,660 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nodeinfomanager includes internal functions used to add/delete labels to
// kubernetes nodes for corresponding CSI drivers
package nodeinfomanager // import "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
import (
"encoding/json"
"fmt"
"strings"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
const (
// Name of node annotation that contains JSON map of driver names to node
annotationKeyNodeID = "csi.volume.kubernetes.io/nodeid"
)
var (
nodeKind = v1.SchemeGroupVersion.WithKind("Node")
updateBackoff = wait.Backoff{
Steps: 4,
Duration: 10 * time.Millisecond,
Factor: 5.0,
Jitter: 0.1,
}
)
// nodeInfoManager contains necessary common dependencies to update node info on both
// the Node and CSINodeInfo objects.
type nodeInfoManager struct {
nodeName types.NodeName
volumeHost volume.VolumeHost
}
// If no updates is needed, the function must return the same Node object as the input.
type nodeUpdateFunc func(*v1.Node) (newNode *v1.Node, updated bool, err error)
// Interface implements an interface for managing labels of a node
type Interface interface {
CreateCSINodeInfo() (*csiv1alpha1.CSINodeInfo, error)
// Record in the cluster the given node information from the CSI driver with the given name.
// Concurrent calls to InstallCSIDriver() is allowed, but they should not be intertwined with calls
// to other methods in this interface.
InstallCSIDriver(driverName string, driverNodeID string, maxVolumeLimit int64, topology map[string]string) error
// Remove in the cluster node information from the CSI driver with the given name.
// Concurrent calls to UninstallCSIDriver() is allowed, but they should not be intertwined with calls
// to other methods in this interface.
UninstallCSIDriver(driverName string) error
}
// NewNodeInfoManager initializes nodeInfoManager
func NewNodeInfoManager(
nodeName types.NodeName,
volumeHost volume.VolumeHost) Interface {
return &nodeInfoManager{
nodeName: nodeName,
volumeHost: volumeHost,
}
}
// InstallCSIDriver updates the node ID annotation in the Node object and CSIDrivers field in the
// CSINodeInfo object. If the CSINodeInfo object doesn't yet exist, it will be created.
// If multiple calls to InstallCSIDriver() are made in parallel, some calls might receive Node or
// CSINodeInfo update conflicts, which causes the function to retry the corresponding update.
func (nim *nodeInfoManager) InstallCSIDriver(driverName string, driverNodeID string, maxAttachLimit int64, topology map[string]string) error {
if driverNodeID == "" {
return fmt.Errorf("error adding CSI driver node info: driverNodeID must not be empty")
}
nodeUpdateFuncs := []nodeUpdateFunc{
updateNodeIDInNode(driverName, driverNodeID),
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
nodeUpdateFuncs = append(nodeUpdateFuncs, updateTopologyLabels(topology))
}
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
nodeUpdateFuncs = append(nodeUpdateFuncs, updateMaxAttachLimit(driverName, maxAttachLimit))
}
err := nim.updateNode(nodeUpdateFuncs...)
if err != nil {
return fmt.Errorf("error updating Node object with CSI driver node info: %v", err)
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
err = nim.updateCSINodeInfo(driverName, driverNodeID, topology)
if err != nil {
return fmt.Errorf("error updating CSINodeInfo object with CSI driver node info: %v", err)
}
}
return nil
}
// UninstallCSIDriver removes the node ID annotation from the Node object and CSIDrivers field from the
// CSINodeInfo object. If the CSINOdeInfo object contains no CSIDrivers, it will be deleted.
// If multiple calls to UninstallCSIDriver() are made in parallel, some calls might receive Node or
// CSINodeInfo update conflicts, which causes the function to retry the corresponding update.
func (nim *nodeInfoManager) UninstallCSIDriver(driverName string) error {
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
err := nim.uninstallDriverFromCSINodeInfo(driverName)
if err != nil {
return fmt.Errorf("error uninstalling CSI driver from CSINodeInfo object %v", err)
}
}
err := nim.updateNode(
removeMaxAttachLimit(driverName),
removeNodeIDFromNode(driverName),
)
if err != nil {
return fmt.Errorf("error removing CSI driver node info from Node object %v", err)
}
return nil
}
func (nim *nodeInfoManager) updateNode(updateFuncs ...nodeUpdateFunc) error {
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUpdateNode(updateFuncs...); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating node: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
// updateNode repeatedly attempts to update the corresponding node object
// which is modified by applying the given update functions sequentially.
// Because updateFuncs are applied sequentially, later updateFuncs should take into account
// the effects of previous updateFuncs to avoid potential conflicts. For example, if multiple
// functions update the same field, updates in the last function are persisted.
func (nim *nodeInfoManager) tryUpdateNode(updateFuncs ...nodeUpdateFunc) error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten.
kubeClient := nim.volumeHost.GetKubeClient()
if kubeClient == nil {
return fmt.Errorf("error getting kube client")
}
nodeClient := kubeClient.CoreV1().Nodes()
originalNode, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return err
}
node := originalNode.DeepCopy()
needUpdate := false
for _, update := range updateFuncs {
newNode, updated, err := update(node)
if err != nil {
return err
}
node = newNode
needUpdate = needUpdate || updated
}
if needUpdate {
// PatchNodeStatus can update both node's status and labels or annotations
// Updating status by directly updating node does not work
_, _, updateErr := nodeutil.PatchNodeStatus(kubeClient.CoreV1(), types.NodeName(node.Name), originalNode, node)
return updateErr
}
return nil
}
// Guarantees the map is non-nil if no error is returned.
func buildNodeIDMapFromAnnotation(node *v1.Node) (map[string]string, error) {
var previousAnnotationValue string
if node.ObjectMeta.Annotations != nil {
previousAnnotationValue =
node.ObjectMeta.Annotations[annotationKeyNodeID]
}
var existingDriverMap map[string]string
if previousAnnotationValue != "" {
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return nil, fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKeyNodeID,
previousAnnotationValue,
err)
}
}
if existingDriverMap == nil {
return make(map[string]string), nil
}
return existingDriverMap, nil
}
// updateNodeIDInNode returns a function that updates a Node object with the given
// Node ID information.
func updateNodeIDInNode(
csiDriverName string,
csiDriverNodeID string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
existingDriverMap, err := buildNodeIDMapFromAnnotation(node)
if err != nil {
return nil, false, err
}
if val, ok := existingDriverMap[csiDriverName]; ok {
if val == csiDriverNodeID {
// Value already exists in node annotation, nothing more to do
return node, false, nil
}
}
// Add/update annotation value
existingDriverMap[csiDriverName] = csiDriverNodeID
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return nil, false, fmt.Errorf(
"error while marshalling node ID map updated with driverName=%q, nodeID=%q: %v",
csiDriverName,
csiDriverNodeID,
err)
}
if node.ObjectMeta.Annotations == nil {
node.ObjectMeta.Annotations = make(map[string]string)
}
node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj)
return node, true, nil
}
}
// removeNodeIDFromNode returns a function that removes node ID information matching the given
// driver name from a Node object.
func removeNodeIDFromNode(csiDriverName string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
var previousAnnotationValue string
if node.ObjectMeta.Annotations != nil {
previousAnnotationValue =
node.ObjectMeta.Annotations[annotationKeyNodeID]
}
if previousAnnotationValue == "" {
return node, false, nil
}
// Parse previousAnnotationValue as JSON
existingDriverMap := map[string]string{}
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return nil, false, fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKeyNodeID,
previousAnnotationValue,
err)
}
if _, ok := existingDriverMap[csiDriverName]; !ok {
// Value is already missing in node annotation, nothing more to do
return node, false, nil
}
// Delete annotation value
delete(existingDriverMap, csiDriverName)
if len(existingDriverMap) == 0 {
delete(node.ObjectMeta.Annotations, annotationKeyNodeID)
} else {
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return nil, false, fmt.Errorf(
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
csiDriverName,
annotationKeyNodeID,
previousAnnotationValue)
}
node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj)
}
return node, true, nil
}
}
// updateTopologyLabels returns a function that updates labels of a Node object with the given
// topology information.
func updateTopologyLabels(topology map[string]string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
if topology == nil || len(topology) == 0 {
return node, false, nil
}
for k, v := range topology {
if curVal, exists := node.Labels[k]; exists && curVal != v {
return nil, false, fmt.Errorf("detected topology value collision: driver reported %q:%q but existing label is %q:%q", k, v, k, curVal)
}
}
if node.Labels == nil {
node.Labels = make(map[string]string)
}
for k, v := range topology {
node.Labels[k] = v
}
return node, true, nil
}
}
func (nim *nodeInfoManager) updateCSINodeInfo(
driverName string,
driverNodeID string,
topology map[string]string) error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUpdateCSINodeInfo(csiKubeClient, driverName, driverNodeID, topology); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating CSINodeInfo: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
func (nim *nodeInfoManager) tryUpdateCSINodeInfo(
csiKubeClient csiclientset.Interface,
driverName string,
driverNodeID string,
topology map[string]string) error {
nodeInfo, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{})
if nodeInfo == nil || errors.IsNotFound(err) {
nodeInfo, err = nim.CreateCSINodeInfo()
}
if err != nil {
return err
}
return nim.installDriverToCSINodeInfo(nodeInfo, driverName, driverNodeID, topology)
}
func (nim *nodeInfoManager) CreateCSINodeInfo() (*csiv1alpha1.CSINodeInfo, error) {
kubeClient := nim.volumeHost.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("error getting kube client")
}
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return nil, fmt.Errorf("error getting CSI client")
}
node, err := kubeClient.CoreV1().Nodes().Get(string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return nil, err
}
nodeInfo := &csiv1alpha1.CSINodeInfo{
ObjectMeta: metav1.ObjectMeta{
Name: string(nim.nodeName),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: nodeKind.Version,
Kind: nodeKind.Kind,
Name: node.Name,
UID: node.UID,
},
},
},
Spec: csiv1alpha1.CSINodeInfoSpec{
Drivers: []csiv1alpha1.CSIDriverInfoSpec{},
},
Status: csiv1alpha1.CSINodeInfoStatus{
Drivers: []csiv1alpha1.CSIDriverInfoStatus{},
},
}
return csiKubeClient.CsiV1alpha1().CSINodeInfos().Create(nodeInfo)
}
func (nim *nodeInfoManager) installDriverToCSINodeInfo(
nodeInfo *csiv1alpha1.CSINodeInfo,
driverName string,
driverNodeID string,
topology map[string]string) error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
topologyKeys := make(sets.String)
for k := range topology {
topologyKeys.Insert(k)
}
specModified := true
statusModified := true
// Clone driver list, omitting the driver that matches the given driverName
newDriverSpecs := []csiv1alpha1.CSIDriverInfoSpec{}
for _, driverInfoSpec := range nodeInfo.Spec.Drivers {
if driverInfoSpec.Name == driverName {
if driverInfoSpec.NodeID == driverNodeID &&
sets.NewString(driverInfoSpec.TopologyKeys...).Equal(topologyKeys) {
specModified = false
}
} else {
// Omit driverInfoSpec matching given driverName
newDriverSpecs = append(newDriverSpecs, driverInfoSpec)
}
}
newDriverStatuses := []csiv1alpha1.CSIDriverInfoStatus{}
for _, driverInfoStatus := range nodeInfo.Status.Drivers {
if driverInfoStatus.Name == driverName {
if driverInfoStatus.Available &&
/* TODO(https://github.com/kubernetes/enhancements/issues/625): Add actual migration status */
driverInfoStatus.VolumePluginMechanism == csiv1alpha1.VolumePluginMechanismInTree {
statusModified = false
}
} else {
// Omit driverInfoSpec matching given driverName
newDriverStatuses = append(newDriverStatuses, driverInfoStatus)
}
}
if !specModified && !statusModified {
return nil
}
// Append new driver
driverSpec := csiv1alpha1.CSIDriverInfoSpec{
Name: driverName,
NodeID: driverNodeID,
TopologyKeys: topologyKeys.List(),
}
driverStatus := csiv1alpha1.CSIDriverInfoStatus{
Name: driverName,
Available: true,
// TODO(https://github.com/kubernetes/enhancements/issues/625): Add actual migration status
VolumePluginMechanism: csiv1alpha1.VolumePluginMechanismInTree,
}
newDriverSpecs = append(newDriverSpecs, driverSpec)
newDriverStatuses = append(newDriverStatuses, driverStatus)
nodeInfo.Spec.Drivers = newDriverSpecs
nodeInfo.Status.Drivers = newDriverStatuses
err := validateCSINodeInfo(nodeInfo)
if err != nil {
return err
}
_, err = csiKubeClient.CsiV1alpha1().CSINodeInfos().Update(nodeInfo)
return err
}
func (nim *nodeInfoManager) uninstallDriverFromCSINodeInfo(
csiDriverName string) error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUninstallDriverFromCSINodeInfo(csiKubeClient, csiDriverName); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating CSINodeInfo: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
func (nim *nodeInfoManager) tryUninstallDriverFromCSINodeInfo(
csiKubeClient csiclientset.Interface,
csiDriverName string) error {
nodeInfoClient := csiKubeClient.CsiV1alpha1().CSINodeInfos()
nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return err // do not wrap error
}
hasModified := false
newDriverStatuses := []csiv1alpha1.CSIDriverInfoStatus{}
for _, driverStatus := range nodeInfo.Status.Drivers {
if driverStatus.Name == csiDriverName {
// Uninstall the driver if we find it
hasModified = driverStatus.Available
driverStatus.Available = false
}
newDriverStatuses = append(newDriverStatuses, driverStatus)
}
nodeInfo.Status.Drivers = newDriverStatuses
if !hasModified {
// No changes, don't update
return nil
}
err = validateCSINodeInfo(nodeInfo)
if err != nil {
return err
}
_, updateErr := nodeInfoClient.Update(nodeInfo)
return updateErr // do not wrap error
}
func updateMaxAttachLimit(driverName string, maxLimit int64) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
if maxLimit <= 0 {
klog.V(4).Infof("skipping adding attach limit for %s", driverName)
return node, false, nil
}
if node.Status.Capacity == nil {
node.Status.Capacity = v1.ResourceList{}
}
if node.Status.Allocatable == nil {
node.Status.Allocatable = v1.ResourceList{}
}
limitKeyName := util.GetCSIAttachLimitKey(driverName)
node.Status.Capacity[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI)
node.Status.Allocatable[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI)
return node, true, nil
}
}
func removeMaxAttachLimit(driverName string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
limitKey := v1.ResourceName(util.GetCSIAttachLimitKey(driverName))
capacityExists := false
if node.Status.Capacity != nil {
_, capacityExists = node.Status.Capacity[limitKey]
}
allocatableExists := false
if node.Status.Allocatable != nil {
_, allocatableExists = node.Status.Allocatable[limitKey]
}
if !capacityExists && !allocatableExists {
return node, false, nil
}
delete(node.Status.Capacity, limitKey)
if len(node.Status.Capacity) == 0 {
node.Status.Capacity = nil
}
delete(node.Status.Allocatable, limitKey)
if len(node.Status.Allocatable) == 0 {
node.Status.Allocatable = nil
}
return node, true, nil
}
}
// validateCSINodeInfo ensures members of CSINodeInfo object satisfies map and set semantics.
// Before calling CSINodeInfoInterface.Update(), validateCSINodeInfo() should be invoked to
// make sure the CSINodeInfo is compliant
func validateCSINodeInfo(nodeInfo *csiv1alpha1.CSINodeInfo) error {
if len(nodeInfo.Status.Drivers) < 1 {
return fmt.Errorf("at least one Driver entry is required in driver statuses")
}
if len(nodeInfo.Spec.Drivers) < 1 {
return fmt.Errorf("at least one Driver entry is required in driver specs")
}
if len(nodeInfo.Status.Drivers) != len(nodeInfo.Spec.Drivers) {
return fmt.Errorf("")
}
// check for duplicate entries for the same driver in statuses
var errors []string
driverNamesInStatuses := make(sets.String)
for _, driverInfo := range nodeInfo.Status.Drivers {
if driverNamesInStatuses.Has(driverInfo.Name) {
errors = append(errors, fmt.Sprintf("duplicate entries found for driver: %s in driver statuses", driverInfo.Name))
}
driverNamesInStatuses.Insert(driverInfo.Name)
}
// check for duplicate entries for the same driver in specs
driverNamesInSpecs := make(sets.String)
for _, driverInfo := range nodeInfo.Spec.Drivers {
if driverNamesInSpecs.Has(driverInfo.Name) {
errors = append(errors, fmt.Sprintf("duplicate entries found for driver: %s in driver specs", driverInfo.Name))
}
driverNamesInSpecs.Insert(driverInfo.Name)
topoKeys := make(sets.String)
for _, key := range driverInfo.TopologyKeys {
if topoKeys.Has(key) {
errors = append(errors, fmt.Sprintf("duplicate topology keys %s found for driver %s in driver specs", key, driverInfo.Name))
}
topoKeys.Insert(key)
}
}
// check all entries in specs and status match
if !driverNamesInSpecs.Equal(driverNamesInStatuses) {
errors = append(errors, fmt.Sprintf("list of drivers in specs: %v does not match list of drivers in statuses: %v", driverNamesInSpecs.List(), driverNamesInStatuses.List()))
}
if len(errors) == 0 {
return nil
}
return fmt.Errorf(strings.Join(errors, ", "))
}

File diff suppressed because it is too large Load Diff

View File

@ -16,10 +16,10 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -30,14 +30,14 @@ go_test(
deps = [
"//pkg/fieldpath:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/empty_dir:go_default_library",
"//pkg/volume/emptydir:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@ -18,10 +18,7 @@ package downwardapi
import (
"fmt"
"path"
"path/filepath"
"sort"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -32,7 +29,7 @@ import (
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"github.com/golang/glog"
"k8s.io/klog"
)
// ProbeVolumePlugins is the entry point for plugin detection in a package.
@ -173,22 +170,23 @@ func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
}
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
klog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts)
if err != nil {
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
klog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err
}
data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode)
if err != nil {
glog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
klog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err
}
setupSuccess := false
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
klog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err
}
@ -196,25 +194,41 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
return err
}
defer func() {
// Clean up directories if setup fails
if !setupSuccess {
unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID)
if unmountCreateErr != nil {
klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr)
return
}
tearDownErr := unmounter.TearDown()
if tearDownErr != nil {
klog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr)
}
}
}()
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
if err != nil {
glog.Errorf("Error creating atomic writer: %v", err)
klog.Errorf("Error creating atomic writer: %v", err)
return err
}
err = writer.Write(data)
if err != nil {
glog.Errorf("Error writing payload to dir: %v", err)
klog.Errorf("Error writing payload to dir: %v", err)
return err
}
err = volume.SetVolumeOwnership(b, fsGroup)
if err != nil {
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
return err
}
setupSuccess = true
return nil
}
@ -241,10 +255,10 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu
if fileInfo.FieldRef != nil {
// TODO: unify with Kubelet.podFieldSelectorRuntimeValue
if values, err := fieldpath.ExtractFieldPathAsString(pod, fileInfo.FieldRef.FieldPath); err != nil {
glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error())
klog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error())
errlist = append(errlist, err)
} else {
fileProjection.Data = []byte(sortLines(values))
fileProjection.Data = []byte(values)
}
} else if fileInfo.ResourceFieldRef != nil {
containerName := fileInfo.ResourceFieldRef.ContainerName
@ -252,10 +266,10 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu
if err != nil {
errlist = append(errlist, err)
} else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil {
glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
klog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
errlist = append(errlist, err)
} else {
fileProjection.Data = []byte(sortLines(values))
fileProjection.Data = []byte(values)
}
}
@ -264,14 +278,6 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu
return data, utilerrors.NewAggregate(errlist)
}
// sortLines sorts the strings generated from map based data
// (annotations and labels)
func sortLines(values string) string {
splitted := strings.Split(values, "\n")
sort.Strings(splitted)
return strings.Join(splitted, "\n")
}
func (d *downwardAPIVolume) GetPath() string {
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName), d.volName)
}
@ -292,10 +298,6 @@ func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func (b *downwardAPIVolumeMounter) getMetaDir() string {
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName)
}
func getVolumeSource(spec *volume.Spec) (*v1.DownwardAPIVolumeSource, bool) {
var readOnly bool
var volumeSource *v1.DownwardAPIVolumeSource

View File

@ -31,7 +31,7 @@ import (
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/empty_dir"
"k8s.io/kubernetes/pkg/volume/emptydir"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
@ -47,7 +47,7 @@ func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.Vo
if err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
}
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, emptydir.ProbeVolumePlugins())
}
func TestCanSupport(t *testing.T) {
@ -82,8 +82,9 @@ func TestDownwardAPI(t *testing.T) {
"key3": "value3",
}
annotations := map[string]string{
"a1": "value1",
"a2": "value2",
"a1": "value1",
"a2": "value2",
"multiline": "c\nb\na",
}
testCases := []struct {
name string
@ -318,8 +319,8 @@ func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected str
t.Errorf(err.Error())
return
}
actualStr := sortLines(string(data))
expectedStr := sortLines(expected)
actualStr := string(data)
expectedStr := expected
if actualStr != expectedStr {
t.Errorf("Found `%s`, expected `%s`", actualStr, expectedStr)
}

View File

@ -1,106 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"empty_dir.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"empty_dir_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"empty_dir_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"empty_dir_unsupported.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/empty_dir",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"//conditions:default": [],
}),
)
go_test(
name = "go_default_test",
srcs = select({
"@io_bazel_rules_go//go/platform:linux": [
"empty_dir_test.go",
],
"//conditions:default": [],
}),
embed = [":go_default_library"],
deps = select({
"@io_bazel_rules_go//go/platform:linux": [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

68
vendor/k8s.io/kubernetes/pkg/volume/emptydir/BUILD generated vendored Normal file
View File

@ -0,0 +1,68 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"empty_dir.go",
"empty_dir_linux.go",
"empty_dir_unsupported.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/emptydir",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"//conditions:default": [],
}),
)
go_test(
name = "go_default_test",
srcs = ["empty_dir_test.go"],
embed = [":go_default_library"],
deps = select({
"@io_bazel_rules_go//go/platform:linux": [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package empty_dir contains the internal representation of emptyDir
// Package emptydir contains the internal representation of emptyDir
// volumes.
package empty_dir // import "k8s.io/kubernetes/pkg/volume/empty_dir"
package emptydir // import "k8s.io/kubernetes/pkg/volume/emptydir"

View File

@ -14,18 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package empty_dir
package emptydir
import (
"fmt"
"os"
"path"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/util/mount"
stringsutil "k8s.io/kubernetes/pkg/util/strings"
@ -40,7 +40,7 @@ import (
// http://issue.k8s.io/2630
const perm os.FileMode = 0777
// This is the primary entrypoint for volume plugins.
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{
&emptyDirPlugin{nil},
@ -184,7 +184,7 @@ func (ed *emptyDir) GetAttributes() volume.Attributes {
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *emptyDir) CanMount() error {
func (ed *emptyDir) CanMount() error {
return nil
}
@ -253,7 +253,7 @@ func (ed *emptyDir) setupTmpfs(dir string) error {
return nil
}
glog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName)
klog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName)
return ed.mounter.Mount("tmpfs", dir, "tmpfs", nil /* options */)
}
@ -281,7 +281,7 @@ func (ed *emptyDir) setupHugepages(dir string) error {
return err
}
glog.V(3).Infof("pod %v: mounting hugepages for volume %v", ed.pod.UID, ed.volName)
klog.V(3).Infof("pod %v: mounting hugepages for volume %v", ed.pod.UID, ed.volName)
return ed.mounter.Mount("nodev", dir, "hugetlbfs", []string{pageSizeMountOption})
}
@ -311,7 +311,7 @@ func getPageSizeMountOptionFromPod(pod *v1.Pod) (string, error) {
}
if !pageSizeFound {
return "", fmt.Errorf("hugePages storage requested, but there is no resource request for huge pages.")
return "", fmt.Errorf("hugePages storage requested, but there is no resource request for huge pages")
}
return fmt.Sprintf("%s=%s", hugePagesPageSizeMountOption, pageSize.String()), nil
@ -349,7 +349,7 @@ func (ed *emptyDir) setupDir(dir string) error {
}
if fileinfo.Mode().Perm() != perm.Perm() {
glog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm())
klog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm())
}
}
@ -370,7 +370,7 @@ func (ed *emptyDir) TearDownAt(dir string) error {
if pathExists, pathErr := volumeutil.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}

View File

@ -16,13 +16,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package empty_dir
package emptydir
import (
"fmt"
"github.com/golang/glog"
"golang.org/x/sys/unix"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/util/mount"
@ -40,7 +40,7 @@ type realMountDetector struct {
}
func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) {
glog.V(5).Infof("Determining mount medium of %v", path)
klog.V(5).Infof("Determining mount medium of %v", path)
notMnt, err := m.mounter.IsLikelyNotMountPoint(path)
if err != nil {
return v1.StorageMediumDefault, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err)
@ -50,7 +50,7 @@ func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool,
return v1.StorageMediumDefault, false, fmt.Errorf("statfs(%q): %v", path, err)
}
glog.V(5).Infof("Statfs_t of %v: %+v", path, buf)
klog.V(5).Infof("Statfs_t of %v: %+v", path, buf)
if buf.Type == linuxTmpfsMagic {
return v1.StorageMediumMemory, !notMnt, nil
} else if int64(buf.Type) == linuxHugetlbfsMagic {

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package empty_dir
package emptydir
import (
"os"

View File

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package empty_dir
package emptydir
import (
"k8s.io/api/core/v1"

View File

@ -23,11 +23,11 @@ go_library(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -43,11 +43,11 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@ -23,10 +23,10 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
@ -40,18 +40,26 @@ type fcAttacher struct {
var _ volume.Attacher = &fcAttacher{}
var _ volume.DeviceMounter = &fcAttacher{}
var _ volume.AttachableVolumePlugin = &fcPlugin{}
var _ volume.DeviceMountableVolumePlugin = &fcPlugin{}
func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) {
return &fcAttacher{
host: plugin.host,
manager: &FCUtil{},
manager: &fcUtil{},
}, nil
}
func (plugin *fcPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *fcPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *fcAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
@ -70,7 +78,7 @@ func (attacher *fcAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty
func (attacher *fcAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host)
if err != nil {
glog.Warningf("failed to get fc mounter: %v", err)
klog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
return attacher.manager.AttachDisk(*mounter)
@ -80,7 +88,7 @@ func (attacher *fcAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host)
if err != nil {
glog.Warningf("failed to get fc mounter: %v", err)
klog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
@ -129,13 +137,19 @@ type fcDetacher struct {
var _ volume.Detacher = &fcDetacher{}
var _ volume.DeviceUnmounter = &fcDetacher{}
func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) {
return &fcDetacher{
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
manager: &FCUtil{},
manager: &fcUtil{},
}, nil
}
func (plugin *fcPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *fcDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}
@ -144,7 +158,7 @@ func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error {
// Specify device name for DetachDisk later
devName, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
if err != nil {
glog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err)
klog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err)
return err
}
// Unmount for deviceMountPath(=globalPDPath)
@ -157,7 +171,7 @@ func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error {
if err != nil {
return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", devName, err)
}
glog.V(4).Infof("fc: successfully detached disk: %s", devName)
klog.V(4).Infof("fc: successfully detached disk: %s", devName)
return nil
}
@ -192,7 +206,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun
if err != nil {
return nil, err
}
glog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode)
klog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode)
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,

View File

@ -19,9 +19,10 @@ package fc
import (
"os"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// Abstract interface to disk operations.
@ -42,14 +43,14 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou
noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
klog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if !noMnt {
return nil
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
klog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
@ -57,27 +58,28 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou
if b.readOnly {
options = append(options, "ro")
}
err = mounter.Mount(globalPDPath, volPath, "", options)
mountOptions := util.JoinMountOptions(options, b.mountOptions)
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
if err != nil {
glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
klog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !noMnt {
if mntErr = b.mounter.Unmount(volPath); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
klog.Errorf("Failed to unmount: %v", mntErr)
return err
}
noMnt, mntErr = b.mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !noMnt {
// will most likely retry on next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath)
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath)
return err
}
}

View File

@ -22,11 +22,11 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
@ -35,7 +35,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
// This is the primary entrypoint for volume plugins.
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&fcPlugin{nil}}
}
@ -106,7 +106,7 @@ func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
return plugin.newMounterInternal(spec, pod.UID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) {
@ -137,22 +137,24 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
if err != nil {
return nil, err
}
glog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode)
klog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode)
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
volumeMode: volumeMode,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
fcDisk: fcDisk,
fsType: fc.FSType,
volumeMode: volumeMode,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
mountOptions: []string{},
}, nil
}
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
fcDisk: fcDisk,
fsType: fc.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
mountOptions: util.MountOptionFromSpec(spec),
}, nil
}
@ -164,7 +166,7 @@ func (plugin *fcPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ v
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
return plugin.newBlockVolumeMapperInternal(spec, uid, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) {
@ -196,7 +198,7 @@ func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID t
func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
return plugin.newUnmounterInternal(volName, podUID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
@ -214,7 +216,7 @@ func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, m
}
func (plugin *fcPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &FCUtil{})
return plugin.newUnmapperInternal(volName, podUID, &fcUtil{})
}
func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) {
@ -237,7 +239,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu
// globalPDPath : plugins/kubernetes.io/fc/50060e801049cfd1-lun-0
var globalPDPath string
mounter := plugin.host.GetMounter(plugin.GetPluginName())
paths, err := mount.GetMountRefs(mounter, mountPath)
paths, err := mounter.GetMountRefs(mountPath)
if err != nil {
return nil, err
}
@ -274,7 +276,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu
FC: &v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32},
},
}
glog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v",
klog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v",
fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun)
} else {
fcVolume = &v1.Volume{
@ -283,7 +285,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu
FC: &v1.FCVolumeSource{WWIDs: []string{volumeInfo}},
},
}
glog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs)
klog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs)
}
return volume.NewSpecFromVolume(fcVolume), nil
}
@ -302,7 +304,7 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m
if err != nil {
return nil, err
}
glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
// Retrieve volumePluginDependentPath from globalMapPathUUID
// globalMapPathUUID examples:
@ -326,13 +328,13 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m
lun32 := int32(lun)
fcPV = createPersistentVolumeFromFCVolumeSource(volumeName,
v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32})
glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v",
klog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v",
fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs,
*fcPV.Spec.PersistentVolumeSource.FC.Lun)
} else {
fcPV = createPersistentVolumeFromFCVolumeSource(volumeName,
v1.FCVolumeSource{WWIDs: []string{volumeInfo}})
glog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs)
klog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs)
}
return volume.NewSpecFromPersistentVolume(fcPV, false), nil
}
@ -361,7 +363,7 @@ func (fc *fcDisk) GetPath() string {
func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, fc.plugin.host)
if err != nil {
glog.Warningf("failed to get fc mounter: %v", err)
klog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
return fc.manager.MakeGlobalVDPDName(*mounter.fcDisk), nil
@ -374,11 +376,12 @@ func (fc *fcDisk) fcPodDeviceMapPath() (string, string) {
type fcDiskMounter struct {
*fcDisk
readOnly bool
fsType string
volumeMode v1.PersistentVolumeMode
mounter *mount.SafeFormatAndMount
deviceUtil util.DeviceUtil
readOnly bool
fsType string
volumeMode v1.PersistentVolumeMode
mounter *mount.SafeFormatAndMount
deviceUtil util.DeviceUtil
mountOptions []string
}
var _ volume.Mounter = &fcDiskMounter{}
@ -406,7 +409,7 @@ func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
// diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
if err != nil {
glog.Errorf("fc: failed to setup")
klog.Errorf("fc: failed to setup")
}
return err
}
@ -459,12 +462,12 @@ func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error {
if err != nil {
return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", mapPath, err)
}
glog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath)
klog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath)
err = os.RemoveAll(mapPath)
if err != nil {
return fmt.Errorf("fc: failed to delete the directory: %s\nError: %v", mapPath, err)
}
glog.V(4).Infof("fc: successfully detached disk: %s", mapPath)
klog.V(4).Infof("fc: successfully detached disk: %s", mapPath)
return nil
}

View File

@ -96,7 +96,7 @@ type fakeDiskManager struct {
detachCalled bool
}
func NewFakeDiskManager() *fakeDiskManager {
func newFakeDiskManager() *fakeDiskManager {
return &fakeDiskManager{
tmpDir: utiltesting.MkTmpdirOrDie("fc_test"),
}
@ -161,7 +161,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeManager := NewFakeDiskManager()
fakeManager := newFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeExec := mount.NewFakeExec(nil)
@ -190,7 +190,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
}
fakeManager2 := NewFakeDiskManager()
fakeManager2 := newFakeDiskManager()
defer fakeManager2.Cleanup()
unmounter, err := plug.(*fcPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
if err != nil {
@ -224,7 +224,7 @@ func doTestPluginNilMounter(t *testing.T, spec *volume.Spec) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeManager := NewFakeDiskManager()
fakeManager := newFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeExec := mount.NewFakeExec(nil)
@ -254,6 +254,7 @@ func TestPluginVolume(t *testing.T) {
func TestPluginPersistentVolume(t *testing.T) {
lun := int32(0)
fs := v1.PersistentVolumeFilesystem
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
@ -266,6 +267,7 @@ func TestPluginPersistentVolume(t *testing.T) {
Lun: &lun,
},
},
VolumeMode: &fs,
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
@ -285,6 +287,7 @@ func TestPluginVolumeWWIDs(t *testing.T) {
}
func TestPluginPersistentVolumeWWIDs(t *testing.T) {
fs := v1.PersistentVolumeFilesystem
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
@ -296,6 +299,7 @@ func TestPluginPersistentVolumeWWIDs(t *testing.T) {
FSType: "ext4",
},
},
VolumeMode: &fs,
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
@ -314,6 +318,7 @@ func TestPluginVolumeNoDiskInfo(t *testing.T) {
}
func TestPluginPersistentVolumeNoDiskInfo(t *testing.T) {
fs := v1.PersistentVolumeFilesystem
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
@ -324,6 +329,7 @@ func TestPluginPersistentVolumeNoDiskInfo(t *testing.T) {
FSType: "ext4",
},
},
VolumeMode: &fs,
},
}
doTestPluginNilMounter(t, volume.NewSpecFromPersistentVolume(vol, false))
@ -337,6 +343,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
defer os.RemoveAll(tmpDir)
lun := int32(0)
fs := v1.PersistentVolumeFilesystem
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
@ -352,6 +359,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
VolumeMode: &fs,
},
}
@ -362,6 +370,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
VolumeMode: &fs,
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
@ -441,7 +450,7 @@ func Test_ConstructVolumeSpec(t *testing.T) {
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2",
}
for _, path := range mountPaths {
refs, err := mount.GetMountRefs(fm, path)
refs, err := fm.GetMountRefs(path)
if err != nil {
t.Errorf("couldn't get mountrefs. err: %v", err)
}
@ -488,7 +497,7 @@ func Test_ConstructVolumeSpecNoRefs(t *testing.T) {
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
}
for _, path := range mountPaths {
refs, _ := mount.GetMountRefs(fm, path)
refs, _ := fm.GetMountRefs(path)
var globalPDPath string
for _, ref := range refs {
if strings.Contains(ref, "kubernetes.io/fc") {

View File

@ -24,13 +24,12 @@ import (
"path/filepath"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
type ioHandler interface {
@ -62,15 +61,15 @@ func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.File
// given a wwn and lun, find the device and associated devicemapper parent
func findDisk(wwn, lun string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) {
fc_path := "-fc-0x" + wwn + "-lun-" + lun
dev_path := byPath
if dirs, err := io.ReadDir(dev_path); err == nil {
fcPath := "-fc-0x" + wwn + "-lun-" + lun
devPath := byPath
if dirs, err := io.ReadDir(devPath); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.Contains(name, fc_path) {
if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil {
if strings.Contains(name, fcPath) {
if disk, err1 := io.EvalSymlinks(devPath + name); err1 == nil {
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
glog.Infof("fc: find disk: %v, dm: %v", disk, dm)
klog.Infof("fc: find disk: %v, dm: %v", disk, dm)
return disk, dm
}
}
@ -90,41 +89,41 @@ func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil)
// The wwid could contain white space and it will be replaced
// underscore when wwid is exposed under /dev/by-id.
fc_path := "scsi-" + wwid
dev_id := byID
if dirs, err := io.ReadDir(dev_id); err == nil {
fcPath := "scsi-" + wwid
devID := byID
if dirs, err := io.ReadDir(devID); err == nil {
for _, f := range dirs {
name := f.Name()
if name == fc_path {
disk, err := io.EvalSymlinks(dev_id + name)
if name == fcPath {
disk, err := io.EvalSymlinks(devID + name)
if err != nil {
glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", dev_id+name, err)
klog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", devID+name, err)
return "", ""
}
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
glog.Infof("fc: find disk: %v, dm: %v", disk, dm)
klog.Infof("fc: find disk: %v, dm: %v", disk, dm)
return disk, dm
}
}
}
glog.V(2).Infof("fc: failed to find a disk [%s]", dev_id+fc_path)
klog.V(2).Infof("fc: failed to find a disk [%s]", devID+fcPath)
return "", ""
}
// Removes a scsi device based upon /dev/sdX name
func removeFromScsiSubsystem(deviceName string, io ioHandler) {
fileName := "/sys/block/" + deviceName + "/device/delete"
glog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName)
klog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName)
data := []byte("1")
io.WriteFile(fileName, data, 0666)
}
// rescan scsi bus
func scsiHostRescan(io ioHandler) {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsi_path); err == nil {
scsiPath := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsiPath); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
name := scsiPath + f.Name() + "/scan"
data := []byte("- - -")
io.WriteFile(name, data, 0666)
}
@ -135,33 +134,31 @@ func scsiHostRescan(io ioHandler) {
func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
if len(wwns) != 0 {
return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
} else {
return path.Join(host.GetPluginDir(fcPluginName), wwids[0])
}
return path.Join(host.GetPluginDir(fcPluginName), wwids[0])
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/target-lun-0
func makeVDPDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
if len(wwns) != 0 {
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
} else {
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0])
}
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0])
}
type FCUtil struct{}
type fcUtil struct{}
func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string {
func (util *fcUtil) MakeGlobalPDName(fc fcDisk) string {
return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
}
// Global volume device plugin dir
func (util *FCUtil) MakeGlobalVDPDName(fc fcDisk) string {
func (util *fcUtil) MakeGlobalVDPDName(fc fcDisk) string {
return makeVDPDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
}
func searchDisk(b fcDiskMounter) (string, error) {
var diskIds []string
var diskIDs []string
var disk string
var dm string
io := b.io
@ -170,9 +167,9 @@ func searchDisk(b fcDiskMounter) (string, error) {
lun := b.lun
if len(wwns) != 0 {
diskIds = wwns
diskIDs = wwns
} else {
diskIds = wwids
diskIDs = wwids
}
rescaned := false
@ -180,11 +177,11 @@ func searchDisk(b fcDiskMounter) (string, error) {
// first phase, search existing device path, if a multipath dm is found, exit loop
// otherwise, in second phase, rescan scsi bus and search again, return with any findings
for true {
for _, diskId := range diskIds {
for _, diskID := range diskIDs {
if len(wwns) != 0 {
disk, dm = findDisk(diskId, lun, io, b.deviceUtil)
disk, dm = findDisk(diskID, lun, io, b.deviceUtil)
} else {
disk, dm = findDiskWWIDs(diskId, io, b.deviceUtil)
disk, dm = findDiskWWIDs(diskID, io, b.deviceUtil)
}
// if multipath device is found, break
if dm != "" {
@ -212,7 +209,7 @@ func searchDisk(b fcDiskMounter) (string, error) {
return disk, nil
}
func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
func (util *fcUtil) AttachDisk(b fcDiskMounter) (string, error) {
devicePath, err := searchDisk(b)
if err != nil {
return "", err
@ -221,7 +218,7 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
// If the volumeMode is 'Block', plugin don't have to format the volume.
// The globalPDPath will be created by operationexecutor. Just return devicePath here.
glog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath)
klog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath)
if b.volumeMode == v1.PersistentVolumeBlock {
return devicePath, nil
}
@ -238,7 +235,7 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
return devicePath, fmt.Errorf("Heuristic determination of mount point failed:%v", err)
}
if !noMnt {
glog.Infof("fc: %s already mounted", globalPDPath)
klog.Infof("fc: %s already mounted", globalPDPath)
return devicePath, nil
}
@ -251,7 +248,7 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
}
// DetachDisk removes scsi device file such as /dev/sdX from the node.
func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
func (util *fcUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
var devices []string
// devicePath might be like /dev/mapper/mpathX. Find destination.
dstPath, err := c.io.EvalSymlinks(devicePath)
@ -265,24 +262,24 @@ func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
// Add single devicepath to devices
devices = append(devices, dstPath)
}
glog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices)
klog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices)
var lastErr error
for _, device := range devices {
err := util.detachFCDisk(c.io, device)
if err != nil {
glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
}
}
if lastErr != nil {
glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
klog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
return lastErr
}
return nil
}
// detachFCDisk removes scsi device file such as /dev/sdX from the node.
func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error {
func (util *fcUtil) detachFCDisk(io ioHandler, devicePath string) error {
// Remove scsi device from the node.
if !strings.HasPrefix(devicePath, "/dev/") {
return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath)
@ -295,7 +292,7 @@ func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error {
// DetachBlockFCDisk detaches a volume from kubelet node, removes scsi device file
// such as /dev/sdX from the node, and then removes loopback for the scsi device.
func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
func (util *fcUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
// Check if devicePath is valid
if len(devicePath) != 0 {
if pathExists, pathErr := checkPathExists(devicePath); !pathExists || pathErr != nil {
@ -304,7 +301,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
} else {
// TODO: FC plugin can't obtain the devicePath from kubelet because devicePath
// in volume object isn't updated when volume is attached to kubelet node.
glog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath)
klog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath)
}
// Check if global map path is valid
@ -335,7 +332,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
for _, fi := range fis {
if strings.Contains(fi.Name(), volumeInfo) {
devicePath = path.Join(searchPath, fi.Name())
glog.V(5).Infof("fc: updated devicePath: %s", devicePath)
klog.V(5).Infof("fc: updated devicePath: %s", devicePath)
break
}
}
@ -346,27 +343,13 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
if err != nil {
return err
}
glog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath)
klog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath)
// Get loopback device which takes fd lock for device beofore detaching a volume from node.
// TODO: This is a workaround for issue #54108
// Currently local attach plugins such as FC, iSCSI, RBD can't obtain devicePath during
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
// local attach plugins needs to remove loopback device during TearDownDevice().
var devices []string
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath)
if len(dm) != 0 {
dstPath = dm
}
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath)
if err != nil {
if err.Error() != volumepathhandler.ErrDeviceNotFound {
return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
}
glog.Warning("fc: loopback for destination path: %s not found", dstPath)
}
// Detach volume from kubelet node
if len(dm) != 0 {
@ -380,21 +363,14 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
for _, device := range devices {
err = util.detachFCDisk(c.io, device)
if err != nil {
glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
}
}
if lastErr != nil {
glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
klog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
return lastErr
}
if len(loop) != 0 {
// The volume was successfully detached from node. We can safely remove the loopback.
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
if err != nil {
return fmt.Errorf("fc: failed to remove loopback :%v, err: %v", loop, err)
}
}
return nil
}
@ -402,7 +378,7 @@ func checkPathExists(path string) (bool, error) {
if pathExists, pathErr := volumeutil.PathExists(path); pathErr != nil {
return pathExists, fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmap skipped because path does not exist: %v", path)
klog.Warningf("Warning: Unmap skipped because path does not exist: %v", path)
return pathExists, nil
}
return true, nil

View File

@ -14,6 +14,8 @@ go_library(
"detacher.go",
"detacher-defaults.go",
"driver-call.go",
"expander.go",
"expander-defaults.go",
"fake_watcher.go",
"mounter.go",
"mounter-defaults.go",
@ -32,11 +34,12 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@ -60,12 +63,13 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//test/utils/harness:go_default_library",
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],

View File

@ -19,7 +19,7 @@ package flexvolume
import (
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
@ -30,13 +30,13 @@ type attacherDefaults flexVolumeAttacher
// Attach is part of the volume.Attacher interface
func (a *attacherDefaults) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName)
klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName)
return "", nil
}
// WaitForAttach is part of the volume.Attacher interface
func (a *attacherDefaults) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath)
klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath)
return devicePath, nil
}
@ -47,7 +47,7 @@ func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir strin
// MountDevice is part of the volume.Attacher interface
func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
volSourceFSType, err := getFSType(spec)
if err != nil {

View File

@ -19,9 +19,9 @@ package flexvolume
import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume"
)
@ -31,6 +31,8 @@ type flexVolumeAttacher struct {
var _ volume.Attacher = &flexVolumeAttacher{}
var _ volume.DeviceMounter = &flexVolumeAttacher{}
// Attach is part of the volume.Attacher interface
func (a *flexVolumeAttacher) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {
@ -89,9 +91,8 @@ func (a *flexVolumeAttacher) MountDevice(spec *volume.Spec, devicePath string, d
// plugin does not implement attach interface.
if devicePath != "" {
return (*attacherDefaults)(a).MountDevice(spec, devicePath, deviceMountPath, a.plugin.host.GetMounter(a.plugin.GetPluginName()))
} else {
return nil
}
return nil
}
return err
}
@ -111,7 +112,7 @@ func (a *flexVolumeAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName t
} else if err == nil {
if !status.Attached {
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name())
klog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name())
}
} else {
return nil, err

View File

@ -22,53 +22,66 @@ import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/test/utils/harness"
)
func TestAttach(t *testing.T) {
func TestAttach(tt *testing.T) {
t := harness.For(tt)
defer t.Close()
spec := fakeVolumeSpec()
plugin, _ := testPlugin()
plugin, _ := testPlugin(t)
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), attachCmd,
specJson(plugin, spec, nil), "localhost"),
specJSON(plugin, spec, nil), "localhost"),
)
a, _ := plugin.NewAttacher()
a.Attach(spec, "localhost")
}
func TestWaitForAttach(t *testing.T) {
func TestWaitForAttach(tt *testing.T) {
t := harness.For(tt)
defer t.Close()
spec := fakeVolumeSpec()
var pod *v1.Pod
plugin, _ := testPlugin()
plugin, _ := testPlugin(t)
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), waitForAttachCmd, "/dev/sdx",
specJson(plugin, spec, nil)),
specJSON(plugin, spec, nil)),
)
a, _ := plugin.NewAttacher()
a.WaitForAttach(spec, "/dev/sdx", pod, 1*time.Second)
}
func TestMountDevice(t *testing.T) {
func TestMountDevice(tt *testing.T) {
t := harness.For(tt)
defer t.Close()
spec := fakeVolumeSpec()
plugin, rootDir := testPlugin()
plugin, rootDir := testPlugin(t)
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), mountDeviceCmd, rootDir+"/mount-dir", "/dev/sdx",
specJson(plugin, spec, nil)),
specJSON(plugin, spec, nil)),
)
a, _ := plugin.NewAttacher()
a.MountDevice(spec, "/dev/sdx", rootDir+"/mount-dir")
}
func TestIsVolumeAttached(t *testing.T) {
func TestIsVolumeAttached(tt *testing.T) {
t := harness.For(tt)
defer t.Close()
spec := fakeVolumeSpec()
plugin, _ := testPlugin()
plugin, _ := testPlugin(t)
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), isAttached, specJson(plugin, spec, nil), "localhost"),
assertDriverCall(t, notSupportedOutput(), isAttached, specJSON(plugin, spec, nil), "localhost"),
)
a, _ := plugin.NewAttacher()
specs := []*volume.Spec{spec}

View File

@ -18,22 +18,18 @@ package flexvolume
import (
"encoding/json"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/test/utils/harness"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
func testPlugin() (*flexVolumeAttachablePlugin, string) {
rootDir, err := utiltesting.MkTmpdir("flexvolume_test")
if err != nil {
panic("error creating temp dir: " + err.Error())
}
func testPlugin(h *harness.Harness) (*flexVolumeAttachablePlugin, string) {
rootDir := h.TempDir("", "flexvolume_test")
return &flexVolumeAttachablePlugin{
flexVolumePlugin: &flexVolumePlugin{
driverName: "test",
@ -44,7 +40,7 @@ func testPlugin() (*flexVolumeAttachablePlugin, string) {
}, rootDir
}
func assertDriverCall(t *testing.T, output fakeexec.FakeCombinedOutputAction, expectedCommand string, expectedArgs ...string) fakeexec.FakeCommandAction {
func assertDriverCall(t *harness.Harness, output fakeexec.FakeCombinedOutputAction, expectedCommand string, expectedArgs ...string) fakeexec.FakeCommandAction {
return func(cmd string, args ...string) exec.Cmd {
if cmd != "/plugin/test" {
t.Errorf("Wrong executable called: got %v, expected %v", cmd, "/plugin/test")
@ -80,11 +76,11 @@ func fakeResultOutput(result interface{}) fakeexec.FakeCombinedOutputAction {
}
func successOutput() fakeexec.FakeCombinedOutputAction {
return fakeResultOutput(&DriverStatus{StatusSuccess, "", "", "", true, nil})
return fakeResultOutput(&DriverStatus{StatusSuccess, "", "", "", true, nil, 0})
}
func notSupportedOutput() fakeexec.FakeCombinedOutputAction {
return fakeResultOutput(&DriverStatus{StatusNotSupported, "", "", "", false, nil})
return fakeResultOutput(&DriverStatus{StatusNotSupported, "", "", "", false, nil, 0})
}
func sameArgs(args, expectedArgs []string) bool {
@ -129,7 +125,7 @@ func fakePersistentVolumeSpec() *volume.Spec {
return volume.NewSpecFromPersistentVolume(vol, false)
}
func specJson(plugin *flexVolumeAttachablePlugin, spec *volume.Spec, extraOptions map[string]string) string {
func specJSON(plugin *flexVolumeAttachablePlugin, spec *volume.Spec, extraOptions map[string]string) string {
o, err := NewOptionsForDriver(spec, plugin.host, extraOptions)
if err != nil {
panic("Failed to convert spec: " + err.Error())

View File

@ -19,8 +19,8 @@ package flexvolume
import (
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume/util"
)
@ -28,18 +28,18 @@ type detacherDefaults flexVolumeDetacher
// Detach is part of the volume.Detacher interface.
func (d *detacherDefaults) Detach(volumeName string, hostName types.NodeName) error {
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName)
klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName)
return nil
}
// WaitForDetach is part of the volume.Detacher interface.
func (d *detacherDefaults) WaitForDetach(devicePath string, timeout time.Duration) error {
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default WaitForDetach for device ", devicePath)
klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default WaitForDetach for device ", devicePath)
return nil
}
// UnmountDevice is part of the volume.Detacher interface.
func (d *detacherDefaults) UnmountDevice(deviceMountPath string) error {
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath)
klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath)
return util.UnmountPath(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()))
}

View File

@ -20,8 +20,8 @@ import (
"fmt"
"os"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
@ -32,6 +32,8 @@ type flexVolumeDetacher struct {
var _ volume.Detacher = &flexVolumeDetacher{}
var _ volume.DeviceUnmounter = &flexVolumeDetacher{}
// Detach is part of the volume.Detacher interface.
func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName) error {
@ -49,19 +51,26 @@ func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName)
// UnmountDevice is part of the volume.Detacher interface.
func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error {
if pathExists, pathErr := util.PathExists(deviceMountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
pathExists, pathErr := util.PathExists(deviceMountPath)
if !pathExists {
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
return nil
}
if pathErr != nil && !util.IsCorruptedMnt(pathErr) {
return fmt.Errorf("Error checking path: %v", pathErr)
}
notmnt, err := isNotMounted(d.plugin.host.GetMounter(d.plugin.GetPluginName()), deviceMountPath)
if err != nil {
return err
if util.IsCorruptedMnt(err) {
notmnt = false // Corrupted error is assumed to be mounted.
} else {
return err
}
}
if notmnt {
glog.Warningf("Warning: Path: %v already unmounted", deviceMountPath)
klog.Warningf("Warning: Path: %v already unmounted", deviceMountPath)
} else {
call := d.plugin.NewDriverCall(unmountDeviceCmd)
call.Append(deviceMountPath)

View File

@ -18,10 +18,15 @@ package flexvolume
import (
"testing"
"k8s.io/kubernetes/test/utils/harness"
)
func TestDetach(t *testing.T) {
plugin, _ := testPlugin()
func TestDetach(tt *testing.T) {
t := harness.For(tt)
defer t.Close()
plugin, _ := testPlugin(t)
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), detachCmd,
"sdx", "localhost"),
@ -31,8 +36,11 @@ func TestDetach(t *testing.T) {
d.Detach("sdx", "localhost")
}
func TestUnmountDevice(t *testing.T) {
plugin, rootDir := testPlugin()
func TestUnmountDevice(tt *testing.T) {
t := harness.For(tt)
defer t.Close()
plugin, rootDir := testPlugin(t)
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), unmountDeviceCmd,
rootDir+"/mount-dir"),

View File

@ -22,7 +22,7 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume"
)
@ -44,6 +44,9 @@ const (
mountCmd = "mount"
unmountCmd = "unmount"
expandVolumeCmd = "expandvolume"
expandFSCmd = "expandfs"
// Option keys
optionFSType = "kubernetes.io/fsType"
optionReadWrite = "kubernetes.io/readwrite"
@ -67,7 +70,7 @@ const (
)
var (
TimeoutError = fmt.Errorf("Timeout")
errTimeout = fmt.Errorf("Timeout")
)
// DriverCall implements the basic contract between FlexVolume and its driver.
@ -92,10 +95,12 @@ func (plugin *flexVolumePlugin) NewDriverCallWithTimeout(command string, timeout
}
}
// Append appends arg into driver call argument list
func (dc *DriverCall) Append(arg string) {
dc.args = append(dc.args, arg)
}
// AppendSpec appends volume spec to driver call argument list
func (dc *DriverCall) AppendSpec(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) error {
optionsForDriver, err := NewOptionsForDriver(spec, host, extraOptions)
if err != nil {
@ -111,6 +116,7 @@ func (dc *DriverCall) AppendSpec(spec *volume.Spec, host volume.VolumeHost, extr
return nil
}
// Run executes the driver call
func (dc *DriverCall) Run() (*DriverStatus, error) {
if dc.plugin.isUnsupported(dc.Command) {
return nil, errors.New(StatusNotSupported)
@ -131,17 +137,17 @@ func (dc *DriverCall) Run() (*DriverStatus, error) {
output, execErr := cmd.CombinedOutput()
if execErr != nil {
if timeout {
return nil, TimeoutError
return nil, errTimeout
}
_, err := handleCmdResponse(dc.Command, output)
if err == nil {
glog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", execPath, execErr)
klog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", execPath, execErr)
return nil, execErr
}
if isCmdNotSupportedErr(err) {
dc.plugin.unsupported(dc.Command)
} else {
glog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output)
klog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output)
}
return nil, err
}
@ -160,6 +166,7 @@ func (dc *DriverCall) Run() (*DriverStatus, error) {
// OptionsForDriver represents the spec given to the driver.
type OptionsForDriver map[string]string
// NewOptionsForDriver create driver options given volume spec
func NewOptionsForDriver(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) (OptionsForDriver, error) {
volSourceFSType, err := getFSType(spec)
@ -217,17 +224,26 @@ type DriverStatus struct {
// By default we assume all the capabilities are supported.
// If the plugin does not support a capability, it can return false for that capability.
Capabilities *DriverCapabilities `json:",omitempty"`
// Returns the actual size of the volume after resizing is done, the size is in bytes.
ActualVolumeSize int64 `json:"volumeNewSize,omitempty"`
}
// DriverCapabilities represents what driver can do
type DriverCapabilities struct {
Attach bool `json:"attach"`
SELinuxRelabel bool `json:"selinuxRelabel"`
Attach bool `json:"attach"`
SELinuxRelabel bool `json:"selinuxRelabel"`
SupportsMetrics bool `json:"supportsMetrics"`
FSGroup bool `json:"fsGroup"`
RequiresFSResize bool `json:"requiresFSResize"`
}
func defaultCapabilities() *DriverCapabilities {
return &DriverCapabilities{
Attach: true,
SELinuxRelabel: true,
Attach: true,
SELinuxRelabel: true,
SupportsMetrics: false,
FSGroup: true,
RequiresFSResize: true,
}
}
@ -248,14 +264,14 @@ func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) {
Capabilities: defaultCapabilities(),
}
if err := json.Unmarshal(output, &status); err != nil {
glog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error())
klog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error())
return nil, err
} else if status.Status == StatusNotSupported {
glog.V(5).Infof("%s command is not supported by the driver", cmd)
klog.V(5).Infof("%s command is not supported by the driver", cmd)
return nil, errors.New(status.Status)
} else if status.Status != StatusSuccess {
errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message)
glog.Errorf(errMsg)
klog.Errorf(errMsg)
return nil, fmt.Errorf("%s", errMsg)
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
type expanderDefaults struct {
plugin *flexVolumePlugin
}
func newExpanderDefaults(plugin *flexVolumePlugin) *expanderDefaults {
return &expanderDefaults{plugin}
}
func (e *expanderDefaults) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
klog.Warning(logPrefix(e.plugin), "using default expand for volume ", spec.Name(), ", to size ", newSize, " from ", oldSize)
return newSize, nil
}
// the defaults for ExpandFS return a generic resize indicator that will trigger the operation executor to go ahead with
// generic filesystem resize
func (e *expanderDefaults) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error {
klog.Warning(logPrefix(e.plugin), "using default filesystem resize for volume ", spec.Name(), ", at ", devicePath)
_, err := util.GenericResizeFS(e.plugin.host, e.plugin.GetPluginName(), devicePath, deviceMountPath)
return err
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flexvolume
import (
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/volume"
"strconv"
)
func (plugin *flexVolumePlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
call := plugin.NewDriverCall(expandVolumeCmd)
call.AppendSpec(spec, plugin.host, nil)
devicePath, err := plugin.getDeviceMountPath(spec)
if err != nil {
return newSize, err
}
call.Append(devicePath)
call.Append(strconv.FormatInt(newSize.Value(), 10))
call.Append(strconv.FormatInt(oldSize.Value(), 10))
_, err = call.Run()
if isCmdNotSupportedErr(err) {
return newExpanderDefaults(plugin).ExpandVolumeDevice(spec, newSize, oldSize)
}
return newSize, err
}
func (plugin *flexVolumePlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, newSize, oldSize resource.Quantity) error {
// This method is called after we spec.PersistentVolume.Spec.Capacity
// has been updated to the new size. The underlying driver thus sees
// the _new_ (requested) size and can find out the _current_ size from
// its underlying storage implementation
if spec.PersistentVolume == nil {
return fmt.Errorf("PersistentVolume not found for spec: %s", spec.Name())
}
call := plugin.NewDriverCall(expandFSCmd)
call.AppendSpec(spec, plugin.host, nil)
call.Append(devicePath)
call.Append(deviceMountPath)
call.Append(strconv.FormatInt(newSize.Value(), 10))
call.Append(strconv.FormatInt(oldSize.Value(), 10))
_, err := call.Run()
if isCmdNotSupportedErr(err) {
return newExpanderDefaults(plugin).ExpandFS(spec, devicePath, deviceMountPath, newSize, oldSize)
}
return err
}

View File

@ -29,7 +29,7 @@ type fakeWatcher struct {
var _ utilfs.FSWatcher = &fakeWatcher{}
func NewFakeWatcher() *fakeWatcher {
func newFakeWatcher() *fakeWatcher {
return &fakeWatcher{
watches: nil,
}

View File

@ -28,6 +28,7 @@ import (
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/utils/exec"
)
const execScriptTempl1 = `#!/usr/bin/env bash
@ -173,8 +174,9 @@ func TestCanSupport(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
runner := exec.New()
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir), volumetest.NewFakeVolumeHost("fake", nil, nil))
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir, runner), volumetest.NewFakeVolumeHost("fake", nil, nil))
plugin, err := plugMgr.FindPluginByName("flexvolume-kubernetes.io/fakeAttacher")
if err != nil {
t.Fatalf("Can't find the plugin by name")
@ -201,8 +203,9 @@ func TestGetAccessModes(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
runner := exec.New()
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plugMgr.InitPlugins(nil, GetDynamicPluginProber(tmpDir, runner), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plugin, err := plugMgr.FindPersistentPluginByName("flexvolume-kubernetes.io/fakeAttacher")
if err != nil {

View File

@ -17,7 +17,7 @@ limitations under the License.
package flexvolume
import (
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume"
)
@ -27,7 +27,7 @@ type mounterDefaults flexVolumeMounter
// SetUpAt is part of the volume.Mounter interface.
// This implementation relies on the attacher's device mount path and does a bind mount to dir.
func (f *mounterDefaults) SetUpAt(dir string, fsGroup *int64) error {
glog.Warning(logPrefix(f.plugin), "using default SetUpAt to ", dir)
klog.Warning(logPrefix(f.plugin), "using default SetUpAt to ", dir)
src, err := f.plugin.getDeviceMountPath(f.spec)
if err != nil {
@ -43,7 +43,7 @@ func (f *mounterDefaults) SetUpAt(dir string, fsGroup *int64) error {
// Returns the default volume attributes.
func (f *mounterDefaults) GetAttributes() volume.Attributes {
glog.V(5).Infof(logPrefix(f.plugin), "using default GetAttributes")
klog.V(5).Infof(logPrefix(f.plugin), "using default GetAttributes")
return volume.Attributes{
ReadOnly: f.readOnly,
Managed: !f.readOnly,

View File

@ -32,7 +32,6 @@ type flexVolumeMounter struct {
// the considered volume spec
spec *volume.Spec
readOnly bool
volume.MetricsNil
}
var _ volume.Mounter = &flexVolumeMounter{}
@ -93,7 +92,9 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
}
if !f.readOnly {
volume.SetVolumeOwnership(f, fsGroup)
if f.plugin.capabilities.FSGroup {
volume.SetVolumeOwnership(f, fsGroup)
}
}
return nil

View File

@ -23,9 +23,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/test/utils/harness"
)
func TestSetUpAt(t *testing.T) {
func TestSetUpAt(tt *testing.T) {
t := harness.For(tt)
defer t.Close()
spec := fakeVolumeSpec()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -39,12 +43,12 @@ func TestSetUpAt(t *testing.T) {
}
mounter := &mount.FakeMounter{}
plugin, rootDir := testPlugin()
plugin, rootDir := testPlugin(t)
plugin.unsupportedCommands = []string{"unsupportedCmd"}
plugin.runner = fakeRunner(
// first call without fsGroup
assertDriverCall(t, successOutput(), mountCmd, rootDir+"/mount-dir",
specJson(plugin, spec, map[string]string{
specJSON(plugin, spec, map[string]string{
optionKeyPodName: "my-pod",
optionKeyPodNamespace: "my-ns",
optionKeyPodUID: "my-uid",
@ -53,7 +57,7 @@ func TestSetUpAt(t *testing.T) {
// second test has fsGroup
assertDriverCall(t, notSupportedOutput(), mountCmd, rootDir+"/mount-dir",
specJson(plugin, spec, map[string]string{
specJSON(plugin, spec, map[string]string{
optionFSGroup: "42",
optionKeyPodName: "my-pod",
optionKeyPodNamespace: "my-ns",
@ -61,7 +65,7 @@ func TestSetUpAt(t *testing.T) {
optionKeyServiceAccountName: "my-sa",
})),
assertDriverCall(t, fakeVolumeNameOutput("sdx"), getVolumeNameCmd,
specJson(plugin, spec, nil)),
specJSON(plugin, spec, nil)),
)
m, _ := plugin.newMounterInternal(spec, pod, mounter, plugin.runner)

Some files were not shown because too many files have changed in this diff Show More