Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -14,22 +14,27 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/csi/labelmanager:go_default_library",
"//pkg/volume/csi/csiv0:go_default_library",
"//pkg/volume/csi/nodeinfomanager:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -41,26 +46,34 @@ go_test(
"csi_client_test.go",
"csi_mounter_test.go",
"csi_plugin_test.go",
"main_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/csi/fake:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -75,8 +88,9 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/volume/csi/csiv0:all-srcs",
"//pkg/volume/csi/fake:all-srcs",
"//pkg/volume/csi/labelmanager:all-srcs",
"//pkg/volume/csi/nodeinfomanager:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],

View File

@ -27,9 +27,8 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/klog"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -56,18 +55,30 @@ type csiAttacher struct {
// volume.Attacher methods
var _ volume.Attacher = &csiAttacher{}
var _ volume.DeviceMounter = &csiAttacher{}
func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
if spec == nil {
glog.Error(log("attacher.Attach missing volume.Spec"))
klog.Error(log("attacher.Attach missing volume.Spec"))
return "", errors.New("missing spec")
}
csiSource, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
klog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
return "", err
}
skip, err := c.plugin.skipAttach(csiSource.Driver)
if err != nil {
klog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err))
return "", err
}
if skip {
klog.V(4).Infof(log("skipping attach for driver %s", csiSource.Driver))
return "", nil
}
node := string(nodeName)
pvName := spec.PersistentVolume.GetName()
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
@ -83,46 +94,58 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{Attached: false},
}
_, err = c.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
alreadyExist := false
if err != nil {
if !apierrs.IsAlreadyExists(err) {
glog.Error(log("attacher.Attach failed: %v", err))
klog.Error(log("attacher.Attach failed: %v", err))
return "", err
}
alreadyExist = true
}
if alreadyExist {
glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
klog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
} else {
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
klog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
}
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
return "", err
}
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
klog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
// TODO(71164): In 1.15, return empty devicePath
return attachID, nil
}
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) {
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, _ string, pod *v1.Pod, timeout time.Duration) (string, error) {
source, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
klog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
return "", err
}
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(c.plugin.host.GetNodeName()))
skip, err := c.plugin.skipAttach(source.Driver)
if err != nil {
klog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err))
return "", err
}
if skip {
klog.V(4).Infof(log("Driver is not attachable, skip waiting for attach"))
return "", nil
}
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
}
func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) {
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
defer timer.Stop()
@ -131,27 +154,19 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim
}
func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) {
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err)
}
successful, err := verifyAttachmentStatus(attach, volumeHandle)
if err != nil {
return "", err
}
// if being deleted, fail fast
if attach.GetDeletionTimestamp() != nil {
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
return "", errors.New("volume attachment is being deleted")
}
// attachment OK
if attach.Status.Attached {
if successful {
return attachID, nil
}
// driver reports attach error
attachErr := attach.Status.AttachError
if attachErr != nil {
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return "", errors.New(attachErr.Message)
}
watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
if err != nil {
@ -165,31 +180,23 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str
select {
case event, ok := <-ch:
if !ok {
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
return "", errors.New("volume attachment watch channel had been closed")
}
switch event.Type {
case watch.Added, watch.Modified:
attach, _ := event.Object.(*storage.VolumeAttachment)
// if being deleted, fail fast
if attach.GetDeletionTimestamp() != nil {
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
return "", errors.New("volume attachment is being deleted")
successful, err := verifyAttachmentStatus(attach, volumeHandle)
if err != nil {
return "", err
}
// attachment OK
if attach.Status.Attached {
if successful {
return attachID, nil
}
// driver reports attach error
attachErr := attach.Status.AttachError
if attachErr != nil {
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return "", errors.New(attachErr.Message)
}
case watch.Deleted:
// if deleted, fail fast
glog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID))
klog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID))
return "", errors.New("volume attachment has been deleted")
case watch.Error:
@ -198,36 +205,66 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str
}
case <-timer.C:
glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
klog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle)
}
}
}
func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) {
// if being deleted, fail fast
if attachment.GetDeletionTimestamp() != nil {
klog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachment.Name))
return false, errors.New("volume attachment is being deleted")
}
// attachment OK
if attachment.Status.Attached {
return true, nil
}
// driver reports attach error
attachErr := attachment.Status.AttachError
if attachErr != nil {
klog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return false, errors.New(attachErr.Message)
}
return false, nil
}
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
klog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
attached := make(map[*volume.Spec]bool)
for _, spec := range specs {
if spec == nil {
glog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
klog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
return nil, errors.New("missing spec")
}
source, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
klog.Error(log("attacher.VolumesAreAttached failed: %v", err))
continue
}
skip, err := c.plugin.skipAttach(source.Driver)
if err != nil {
klog.Error(log("Failed to check CSIDriver for %s: %s", source.Driver, err))
} else {
if skip {
// This volume is not attachable, pretend it's attached
attached[spec] = true
continue
}
}
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
attached[spec] = false
klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
continue
}
glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
klog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
attached[spec] = attach.Status.Attached
}
@ -235,27 +272,32 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
}
func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
glog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec))
klog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec))
deviceMountPath, err := makeDeviceMountPath(c.plugin, spec)
if err != nil {
glog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err))
klog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err))
return "", err
}
glog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath)
klog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath)
return deviceMountPath, nil
}
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) (err error) {
glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
if deviceMountPath == "" {
err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
return err
}
mounted, err := isDirMounted(c.plugin, deviceMountPath)
if err != nil {
glog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath))
klog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath))
return err
}
if mounted {
glog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath))
klog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath))
return nil
}
@ -265,77 +307,64 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
}
csiSource, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err))
klog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err))
return err
}
// Store volume metadata for UnmountDevice. Keep it around even if the
// driver does not support NodeStage, UnmountDevice still needs it.
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
klog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
klog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
dataDir := filepath.Dir(deviceMountPath)
data := map[string]string{
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
}
if err = saveVolumeData(dataDir, volDataFileName, data); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
klog.Error(log("failed to save volume info data: %v", err))
if cleanerr := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr))
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr))
}
return err
}
defer func() {
if err != nil {
// clean up metadata
glog.Errorf(log("attacher.MountDevice failed: %v", err))
klog.Errorf(log("attacher.MountDevice failed: %v", err))
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
glog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err))
klog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err))
}
}
}()
if c.csiClient == nil {
c.csiClient = newCsiDriverClient(csiSource.Driver)
c.csiClient, err = newCsiDriverClient(csiDriverName(csiSource.Driver))
if err != nil {
klog.Errorf(log("attacher.MountDevice failed to create newCsiDriverClient: %v", err))
return err
}
}
csi := c.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
return err
}
if !stageUnstageSet {
glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
return nil
}
// Start MountDevice
if deviceMountPath == "" {
err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
return err
}
nodeName := string(c.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
return err // This err already has enough context ("VolumeAttachment xyz not found")
}
if attachment == nil {
err = errors.New("no existing VolumeAttachment found")
return err
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
publishContext, err := c.plugin.getPublishContext(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName)
nodeStageSecrets := map[string]string{}
if csiSource.NodeStageSecretRef != nil {
@ -356,7 +385,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
fsType := csiSource.FSType
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
publishContext,
deviceMountPath,
fsType,
accessMode,
@ -367,21 +396,23 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
return err
}
glog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
klog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
return nil
}
var _ volume.Detacher = &csiAttacher{}
var _ volume.DeviceUnmounter = &csiAttacher{}
func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
if volumeName == "" {
glog.Error(log("detacher.Detach missing value for parameter volumeName"))
klog.Error(log("detacher.Detach missing value for parameter volumeName"))
return errors.New("missing expected parameter volumeName")
}
parts := strings.Split(volumeName, volNameSep)
if len(parts) != 2 {
glog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
klog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
return errors.New("volumeName missing expected data")
}
@ -391,19 +422,19 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil {
if apierrs.IsNotFound(err) {
// object deleted or never existed, done
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
return nil
}
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
klog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
return err
}
glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
return c.waitForVolumeDetachment(volID, attachID)
}
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error {
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
timeout := c.waitSleepTime * 10
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
@ -413,21 +444,21 @@ func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) err
}
func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error {
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
//object deleted or never existed, done
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
return nil
}
glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
klog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
return err
}
// driver reports attach error
detachErr := attach.Status.DetachError
if detachErr != nil {
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
return errors.New(detachErr.Message)
}
@ -442,7 +473,7 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str
select {
case event, ok := <-ch:
if !ok {
glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID)
return errors.New("volume attachment watch channel had been closed")
}
@ -452,12 +483,12 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str
// driver reports attach error
detachErr := attach.Status.DetachError
if detachErr != nil {
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
return errors.New(detachErr.Message)
}
case watch.Deleted:
//object deleted
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle))
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle))
return nil
case watch.Error:
@ -466,14 +497,14 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str
}
case <-timer.C:
glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
klog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
return fmt.Errorf("detachment timeout for volume %v", volumeHandle)
}
}
}
func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
klog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
// Setup
var driverName, volID string
@ -483,31 +514,35 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
driverName = data[volDataKey.driverName]
volID = data[volDataKey.volHandle]
} else {
glog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err))
klog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err))
// The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server
driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
return err
}
}
if c.csiClient == nil {
c.csiClient = newCsiDriverClient(driverName)
c.csiClient, err = newCsiDriverClient(csiDriverName(driverName))
if err != nil {
klog.Errorf(log("attacher.UnmountDevice failed to create newCsiDriverClient: %v", err))
return err
}
}
csi := c.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
klog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
return err
}
if !stageUnstageSet {
glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
klog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
// Just delete the global directory + json file
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
@ -522,7 +557,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
deviceMountPath)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed: %v", err))
klog.Errorf(log("attacher.UnmountDevice failed: %v", err))
return err
}
@ -531,28 +566,10 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
}
glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
klog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
return nil
}
func hasStageUnstageCapability(ctx context.Context, csi csiClient) (bool, error) {
capabilities, err := csi.NodeGetCapabilities(ctx)
if err != nil {
return false, err
}
stageUnstageSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
stageUnstageSet = true
}
}
return stageUnstageSet, nil
}
// getAttachmentName returns csi-<sha252(volName,csiDriverName,NodeName>
func getAttachmentName(volName, csiDriverName, nodeName string) string {
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))

View File

@ -29,14 +29,26 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes"
fakeclient "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
utiltesting "k8s.io/client-go/util/testing"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
var (
bFalse = false
bTrue = true
)
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
return &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
@ -57,6 +69,40 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach
}
}
func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.RaceFreeFakeWatcher, attachID string, status storage.VolumeAttachmentStatus) {
ticker := time.NewTicker(10 * time.Millisecond)
var attach *storage.VolumeAttachment
var err error
defer ticker.Stop()
// wait for attachment to be saved
for i := 0; i < 100; i++ {
attach, err = client.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
<-ticker.C
continue
}
t.Error(err)
}
if attach != nil {
klog.Infof("stopping wait")
break
}
}
klog.Infof("stopped wait")
if attach == nil {
t.Logf("attachment not found for id:%v", attachID)
} else {
attach.Status = status
_, err := client.StorageV1beta1().VolumeAttachments().Update(attach)
if err != nil {
t.Error(err)
}
watch.Modify(attach)
}
}
func TestAttacherAttach(t *testing.T) {
testCases := []struct {
@ -120,8 +166,7 @@ func TestAttacherAttach(t *testing.T) {
// attacher loop
for i, tc := range testCases {
t.Logf("test case: %s", tc.name)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -146,42 +191,211 @@ func TestAttacherAttach(t *testing.T) {
}
}(tc.attachID, tc.nodeName, tc.shouldFail)
// update attachment to avoid long waitForAttachment
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
// wait for attachment to be saved
var attach *storage.VolumeAttachment
for i := 0; i < 100; i++ {
attach, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
<-ticker.C
continue
}
t.Error(err)
var status storage.VolumeAttachmentStatus
if tc.injectAttacherError {
status.Attached = false
status.AttachError = &storage.VolumeError{
Message: "attacher error",
}
if attach != nil {
break
}
}
if attach == nil {
t.Logf("attachment not found for id:%v", tc.attachID)
} else {
if tc.injectAttacherError {
attach.Status.Attached = false
attach.Status.AttachError = &storage.VolumeError{
Message: "attacher error",
}
} else {
attach.Status.Attached = true
}
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
if err != nil {
t.Error(err)
}
fakeWatcher.Modify(attach)
status.Attached = true
}
markVolumeAttached(t, csiAttacher.k8s, fakeWatcher, tc.attachID, status)
}
}
func TestAttacherWithCSIDriver(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, true)()
tests := []struct {
name string
driver string
expectVolumeAttachment bool
}{
{
name: "CSIDriver not attachable",
driver: "not-attachable",
expectVolumeAttachment: false,
},
{
name: "CSIDriver is attachable",
driver: "attachable",
expectVolumeAttachment: true,
},
{
name: "CSIDriver.AttachRequired not set -> failure",
driver: "nil",
expectVolumeAttachment: true,
},
{
name: "CSIDriver does not exist not set -> failure",
driver: "unknown",
expectVolumeAttachment: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("not-attachable", nil, &bFalse),
getCSIDriver("attachable", nil, &bTrue),
getCSIDriver("nil", nil, nil),
)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, fakeCSIClient)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
expectedAttachID := getAttachmentName("test-vol", test.driver, "node")
status := storage.VolumeAttachmentStatus{
Attached: true,
}
if test.expectVolumeAttachment {
go markVolumeAttached(t, csiAttacher.k8s, fakeWatcher, expectedAttachID, status)
}
attachID, err := csiAttacher.Attach(spec, types.NodeName("node"))
if err != nil {
t.Errorf("Attach() failed: %s", err)
}
if test.expectVolumeAttachment && attachID == "" {
t.Errorf("Epected attachID, got nothing")
}
if !test.expectVolumeAttachment && attachID != "" {
t.Errorf("Epected empty attachID, got %q", attachID)
}
})
}
}
func TestAttacherWaitForVolumeAttachmentWithCSIDriver(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, true)()
// In order to detect if the volume plugin would skip WaitForAttach for non-attachable drivers,
// we do not instantiate any VolumeAttachment. So if the plugin does not skip attach, WaitForVolumeAttachment
// will return an error that volume attachment was not found.
tests := []struct {
name string
driver string
expectError bool
}{
{
name: "CSIDriver not attachable -> success",
driver: "not-attachable",
expectError: false,
},
{
name: "CSIDriver is attachable -> failure",
driver: "attachable",
expectError: true,
},
{
name: "CSIDriver.AttachRequired not set -> failure",
driver: "nil",
expectError: true,
},
{
name: "CSIDriver does not exist not set -> failure",
driver: "unknown",
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("not-attachable", nil, &bFalse),
getCSIDriver("attachable", nil, &bTrue),
getCSIDriver("nil", nil, nil),
)
plug, tmpDir := newTestPlugin(t, nil, fakeCSIClient)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
_, err = csiAttacher.WaitForAttach(spec, "", nil, time.Second)
if err != nil && !test.expectError {
t.Errorf("Unexpected error: %s", err)
}
if err == nil && test.expectError {
t.Errorf("Expected error, got none")
}
})
}
}
func TestAttacherWaitForAttach(t *testing.T) {
tests := []struct {
name string
driver string
makeAttachment func() *storage.VolumeAttachment
expectedAttachID string
expectError bool
}{
{
name: "successful attach",
driver: "attachable",
makeAttachment: func() *storage.VolumeAttachment {
testAttachID := getAttachmentName("test-vol", "attachable", "node")
successfulAttachment := makeTestAttachment(testAttachID, "node", "test-pv")
successfulAttachment.Status.Attached = true
return successfulAttachment
},
expectedAttachID: getAttachmentName("test-vol", "attachable", "node"),
expectError: false,
},
{
name: "failed attach",
driver: "attachable",
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, test.driver, "test-vol"), false)
if test.makeAttachment != nil {
attachment := test.makeAttachment()
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to create VolumeAttachment: %v", err)
}
gotAttachment, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Get(attachment.Name, meta.GetOptions{})
if err != nil {
t.Fatalf("failed to get created VolumeAttachment: %v", err)
}
t.Logf("created test VolumeAttachment %+v", gotAttachment)
}
attachID, err := csiAttacher.WaitForAttach(spec, "", nil, time.Second)
if err != nil && !test.expectError {
t.Errorf("Unexpected error: %s", err)
}
if err == nil && test.expectError {
t.Errorf("Expected error, got none")
}
if attachID != test.expectedAttachID {
t.Errorf("Expected attachID %q, got %q", test.expectedAttachID, attachID)
}
})
}
}
@ -237,7 +451,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
}
for i, tc := range testCases {
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -287,7 +501,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
}
func TestAttacherVolumesAreAttached(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -374,7 +588,7 @@ func TestAttacherDetach(t *testing.T) {
for _, tc := range testCases {
t.Logf("running test: %v", tc.name)
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
if tc.reactor != nil {
client.PrependReactor("*", "*", tc.reactor)
@ -423,7 +637,7 @@ func TestAttacherDetach(t *testing.T) {
func TestAttacherGetDeviceMountPath(t *testing.T) {
// Setup
// Create a new attacher
plug, _, tmpDir, _ := newTestWatchPlugin(t)
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -532,7 +746,7 @@ func TestAttacherMountDevice(t *testing.T) {
// Setup
// Create a new attacher
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -541,6 +755,10 @@ func TestAttacherMountDevice(t *testing.T) {
csiAttacher := attacher.(*csiAttacher)
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
if tc.deviceMountPath != "" {
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
}
nodeName := string(csiAttacher.plugin.host.GetNodeName())
// Create spec
@ -585,12 +803,12 @@ func TestAttacherMountDevice(t *testing.T) {
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", numStaged, len(staged))
}
if tc.stageUnstageSet {
gotPath, ok := staged[tc.volName]
vol, ok := staged[tc.volName]
if !ok {
t.Errorf("could not find staged volume: %s", tc.volName)
}
if gotPath != tc.deviceMountPath {
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, gotPath)
if vol.Path != tc.deviceMountPath {
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, vol.Path)
}
}
}
@ -649,6 +867,8 @@ func TestAttacherUnmountDevice(t *testing.T) {
},
{
testName: "stage_unstage not set no vars should not fail",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName":"test-driver","volumeHandle":"test-vol1"}`,
stageUnstageSet: false,
},
}
@ -657,7 +877,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
t.Logf("Running test case: %s", tc.testName)
// Setup
// Create a new attacher
plug, _, tmpDir, _ := newTestWatchPlugin(t)
plug, _, tmpDir, _ := newTestWatchPlugin(t, nil)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -672,7 +892,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
// Add the volume to NodeStagedVolumes
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath, nil)
// Make JSON for this object
if tc.deviceMountPath != "" {
@ -743,7 +963,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
}
// create a plugin mgr to load plugins and setup a fake client
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
func newTestWatchPlugin(t *testing.T, csiClient *fakecsi.Clientset) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
@ -753,10 +973,15 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, s
fakeWatcher := watch.NewRaceFreeFake()
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
host := volumetest.NewFakeVolumeHost(
if csiClient == nil {
csiClient = fakecsi.NewSimpleClientset()
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
csiClient,
nil,
"node",
)
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
@ -771,5 +996,12 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, s
t.Fatalf("cannot assert plugin to be type csiPlugin")
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return csiPlug.csiDriverInformer.Informer().HasSynced(), nil
})
}
return csiPlug, fakeWatcher, tmpDir, fakeClient
}

View File

@ -21,22 +21,26 @@ import (
"errors"
"fmt"
"os"
"path"
"path/filepath"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
ioutil "k8s.io/kubernetes/pkg/volume/util"
)
type csiBlockMapper struct {
k8s kubernetes.Interface
csiClient csiClient
plugin *csiPlugin
driverName string
driverName csiDriverName
specName string
volumeID string
readOnly bool
@ -47,76 +51,61 @@ type csiBlockMapper struct {
var _ volume.BlockVolumeMapper = &csiBlockMapper{}
// GetGlobalMapPath returns a path (on the node) where the devicePath will be symlinked to
// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}
// GetGlobalMapPath returns a global map path (on the node) to a device file which will be symlinked to
// Example: plugins/kubernetes.io/csi/volumeDevices/{pvname}/dev
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host)
glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
return dir, nil
}
// GetPodDeviceMapPath returns pod's device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~csi/, {volumeID}
// getStagingPath returns a staging path for a directory (on the node) that should be used on NodeStageVolume/NodeUnstageVolume
// Example: plugins/kubernetes.io/csi/volumeDevices/staging/{pvname}
func (m *csiBlockMapper) getStagingPath() string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(m.specName)
return path.Join(m.plugin.host.GetVolumeDevicePluginDir(csiPluginName), "staging", sanitizedSpecVolID)
}
// getPublishPath returns a publish path for a file (on the node) that should be used on NodePublishVolume/NodeUnpublishVolume
// Example: plugins/kubernetes.io/csi/volumeDevices/publish/{pvname}
func (m *csiBlockMapper) getPublishPath() string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(m.specName)
return path.Join(m.plugin.host.GetVolumeDevicePluginDir(csiPluginName), "publish", sanitizedSpecVolID)
}
// GetPodDeviceMapPath returns pod's device file which will be mapped to a volume
// returns: pods/{podUid}/volumeDevices/kubernetes.io~csi, {pvname}
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
path, specName := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName
glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath = %s", path))
path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, kstrings.EscapeQualifiedNameForDisk(csiPluginName))
specName := m.specName
klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, specName))
return path, specName
}
// SetUpDevice ensures the device is attached returns path where the device is located.
func (m *csiBlockMapper) SetUpDevice() (string, error) {
if !m.plugin.blockEnabled {
return "", errors.New("CSIBlockVolume feature not enabled")
}
// stageVolumeForBlock stages a block volume to stagingPath
func (m *csiBlockMapper) stageVolumeForBlock(
ctx context.Context,
csi csiClient,
accessMode v1.PersistentVolumeAccessMode,
csiSource *v1.CSIPersistentVolumeSource,
attachment *storage.VolumeAttachment,
) (string, error) {
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock called"))
glog.V(4).Infof(log("blockMapper.SetupDevice called"))
if m.spec == nil {
glog.Error(log("blockMapper.Map spec is nil"))
return "", fmt.Errorf("spec is nil")
}
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err))
return "", err
}
globalMapPath, err := m.GetGlobalMapPath(m.spec)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err))
return "", err
}
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
stagingPath := m.getStagingPath()
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
klog.Error(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
return "", err
}
if !stageUnstageSet {
glog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
klog.Infof(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
return "", nil
}
// Start MountDevice
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
return "", err
}
if attachment == nil {
glog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID))
return "", errors.New("no existing VolumeAttachment found")
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
nodeStageSecrets := map[string]string{}
@ -128,95 +117,126 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
}
}
// create globalMapPath before call to NodeStageVolume
if err := os.MkdirAll(globalMapPath, 0750); err != nil {
glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err))
// Creating a stagingPath directory before call to NodeStageVolume
if err := os.MkdirAll(stagingPath, 0750); err != nil {
klog.Error(log("blockMapper.stageVolumeForBlock failed to create dir %s: %v", stagingPath, err))
return "", err
}
glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if m.spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
}
klog.V(4).Info(log("blockMapper.stageVolumeForBlock created stagingPath directory successfully [%s]", stagingPath))
// Request to stage a block volume to stagingPath.
// Expected implementation for driver is creating driver specific resource on stagingPath and
// attaching the block volume to the node.
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
globalMapPath,
stagingPath,
fsTypeBlockName,
accessMode,
nodeStageSecrets,
csiSource.VolumeAttributes)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed: %v", err))
if err := os.RemoveAll(globalMapPath); err != nil {
glog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err))
}
klog.Error(log("blockMapper.stageVolumeForBlock failed: %v", err))
return "", err
}
glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPath))
return globalMapPath, nil
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
return stagingPath, nil
}
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
if !m.plugin.blockEnabled {
return errors.New("CSIBlockVolume feature not enabled")
// publishVolumeForBlock publishes a block volume to publishPath
func (m *csiBlockMapper) publishVolumeForBlock(
ctx context.Context,
csi csiClient,
accessMode v1.PersistentVolumeAccessMode,
csiSource *v1.CSIPersistentVolumeSource,
attachment *storage.VolumeAttachment,
stagingPath string,
) (string, error) {
klog.V(4).Infof(log("blockMapper.publishVolumeForBlock called"))
publishVolumeInfo := attachment.Status.AttachmentMetadata
nodePublishSecrets := map[string]string{}
var err error
if csiSource.NodePublishSecretRef != nil {
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
if err != nil {
klog.Errorf("blockMapper.publishVolumeForBlock failed to get NodePublishSecretRef %s/%s: %v",
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
return "", err
}
}
glog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath))
publishPath := m.getPublishPath()
// Setup a parent directory for publishPath before call to NodePublishVolume
publishDir := filepath.Dir(publishPath)
if err := os.MkdirAll(publishDir, 0750); err != nil {
klog.Error(log("blockMapper.publishVolumeForBlock failed to create dir %s: %v", publishDir, err))
return "", err
}
klog.V(4).Info(log("blockMapper.publishVolumeForBlock created directory for publishPath successfully [%s]", publishDir))
// Request to publish a block volume to publishPath.
// Expectation for driver is to place a block volume on the publishPath, by bind-mounting the device file on the publishPath or
// creating device file on the publishPath.
// Parent directory for publishPath is created by k8s, but driver is responsible for creating publishPath itself.
// If driver doesn't implement NodeStageVolume, attaching the block volume to the node may be done, here.
err = csi.NodePublishVolume(
ctx,
m.volumeID,
m.readOnly,
stagingPath,
publishPath,
accessMode,
publishVolumeInfo,
csiSource.VolumeAttributes,
nodePublishSecrets,
fsTypeBlockName,
[]string{},
)
if err != nil {
klog.Errorf(log("blockMapper.publishVolumeForBlock failed: %v", err))
return "", err
}
return publishPath, nil
}
// SetUpDevice ensures the device is attached returns path where the device is located.
func (m *csiBlockMapper) SetUpDevice() (string, error) {
if !m.plugin.blockEnabled {
return "", errors.New("CSIBlockVolume feature not enabled")
}
klog.V(4).Infof(log("blockMapper.SetUpDevice called"))
// Get csiSource from spec
if m.spec == nil {
glog.Error(log("blockMapper.MapDevice spec is nil"))
return fmt.Errorf("spec is nil")
klog.Error(log("blockMapper.SetUpDevice spec is nil"))
return "", fmt.Errorf("spec is nil")
}
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
glog.Error(log("blockMapper.Map failed to get CSI persistent source: %v", err))
return err
klog.Error(log("blockMapper.SetUpDevice failed to get CSI persistent source: %v", err))
return "", err
}
dir := filepath.Join(volumeMapPath, volumeMapName)
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err))
return err
klog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
return "", err
}
if attachment == nil {
glog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
klog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID))
return "", errors.New("no existing VolumeAttachment found")
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
nodePublishSecrets := map[string]string{}
if csiSource.NodePublishSecretRef != nil {
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
if err != nil {
glog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v",
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
return err
}
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("blockMapper.MapDevice failed to create dir %#v: %v", dir, err))
return err
}
glog.V(4).Info(log("blockMapper.MapDevice created NodePublish path [%s]", dir))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
@ -224,60 +244,117 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
}
err = csi.NodePublishVolume(
ctx,
m.volumeID,
m.readOnly,
globalMapPath,
dir,
accessMode,
publishVolumeInfo,
csiSource.VolumeAttributes,
nodePublishSecrets,
fsTypeBlockName,
)
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Call NodeStageVolume
stagingPath, err := m.stageVolumeForBlock(ctx, m.csiClient, accessMode, csiSource, attachment)
if err != nil {
glog.Errorf(log("blockMapper.MapDevice failed: %v", err))
if err := os.RemoveAll(dir); err != nil {
glog.Error(log("blockMapper.MapDevice failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
}
return "", err
}
// Call NodePublishVolume
publishPath, err := m.publishVolumeForBlock(ctx, m.csiClient, accessMode, csiSource, attachment, stagingPath)
if err != nil {
return "", err
}
return publishPath, nil
}
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
return ioutil.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
}
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
// unpublishVolumeForBlock unpublishes a block volume from publishPath
func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiClient, publishPath string) error {
// Request to unpublish a block volume from publishPath.
// Expectation for driver is to remove block volume from the publishPath, by unmounting bind-mounted device file
// or deleting device file.
// Driver is responsible for deleting publishPath itself.
// If driver doesn't implement NodeUnstageVolume, detaching the block volume from the node may be done, here.
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil {
klog.Error(log("blockMapper.unpublishVolumeForBlock failed: %v", err))
return err
}
klog.V(4).Infof(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
return nil
}
// unstageVolumeForBlock unstages a block volume from stagingPath
func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClient, stagingPath string) error {
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
klog.Error(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
return err
}
if !stageUnstageSet {
klog.Infof(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
return nil
}
// Request to unstage a block volume from stagingPath.
// Expected implementation for driver is removing driver specific resource in stagingPath and
// detaching the block volume from the node.
if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil {
klog.Errorf(log("blockMapper.unstageVolumeForBlock failed: %v", err))
return err
}
klog.V(4).Infof(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
// Remove stagingPath directory and its contents
if err := os.RemoveAll(stagingPath); err != nil {
klog.Error(log("blockMapper.unstageVolumeForBlock failed to remove staging path after NodeUnstageVolume() error [%s]: %v", stagingPath, err))
return err
}
return nil
}
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
// TearDownDevice removes traces of the SetUpDevice.
func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error {
if !m.plugin.blockEnabled {
return errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath))
klog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath))
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// unmap global device map path
if err := csi.NodeUnstageVolume(ctx, m.volumeID, globalMapPath); err != nil {
glog.Errorf(log("blockMapper.TearDownDevice failed: %v", err))
return err
}
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath))
// request to remove pod volume map path also
podVolumePath, volumeName := m.GetPodDeviceMapPath()
podVolumeMapPath := filepath.Join(podVolumePath, volumeName)
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, podVolumeMapPath); err != nil {
glog.Error(log("blockMapper.TearDownDevice failed: %v", err))
return err
// Call NodeUnpublishVolume
publishPath := m.getPublishPath()
if _, err := os.Stat(publishPath); err != nil {
if os.IsNotExist(err) {
klog.V(4).Infof(log("blockMapper.TearDownDevice publishPath(%s) has already been deleted, skip calling NodeUnpublishVolume", publishPath))
} else {
return err
}
} else {
err := m.unpublishVolumeForBlock(ctx, m.csiClient, publishPath)
if err != nil {
return err
}
}
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath))
// Call NodeUnstageVolume
stagingPath := m.getStagingPath()
if _, err := os.Stat(stagingPath); err != nil {
if os.IsNotExist(err) {
klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
} else {
return err
}
} else {
err := m.unstageVolumeForBlock(ctx, m.csiClient, stagingPath)
if err != nil {
return err
}
}
return nil
}

View File

@ -25,13 +25,34 @@ import (
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
fakeclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func prepareBlockMapperTest(plug *csiPlugin, specVolumeName string, t *testing.T) (*csiBlockMapper, *volume.Spec, *api.PersistentVolume, error) {
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV(specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
return nil, nil, nil, fmt.Errorf("Failed to make a new Mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
return csiMapper, spec, pv, nil
}
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
@ -53,17 +74,10 @@ func TestBlockMapperGetGlobalMapPath(t *testing.T) {
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
csiMapper, spec, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
path, err := csiMapper.GetGlobalMapPath(spec)
if err != nil {
@ -76,35 +90,147 @@ func TestBlockMapperGetGlobalMapPath(t *testing.T) {
}
}
func TestBlockMapperGetStagingPath(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/staging/%s", "spec-0")),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/staging/%s", "test.spec.1")),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
path := csiMapper.getStagingPath()
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
}
}
func TestBlockMapperGetPublishPath(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/publish/%s", "spec-0")),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/publish/%s", "test.spec.1")),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
path := csiMapper.getPublishPath()
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
}
}
func TestBlockMapperGetDeviceMapPath(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumeDevices/kubernetes.io~csi", testPodUID)),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumeDevices/kubernetes.io~csi", testPodUID)),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
csiMapper, _, _, err := prepareBlockMapperTest(plug, tc.specVolumeName, t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
path, volName := csiMapper.GetPodDeviceMapPath()
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
if tc.specVolumeName != volName {
t.Errorf("expecting volName %s, got %s", tc.specVolumeName, volName)
}
}
}
func TestBlockMapperSetupDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv", t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
pvName := pv.GetName()
nodeName := string(plug.host.GetNodeName())
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// MapDevice
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to create new mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
csiMapper.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName))
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = true
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
@ -118,50 +244,60 @@ func TestBlockMapperSetupDevice(t *testing.T) {
t.Fatalf("mapper failed to SetupDevice: %v", err)
}
globalMapPath, err := csiMapper.GetGlobalMapPath(spec)
if err != nil {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
// Check if SetUpDevice returns the right path
publishPath := csiMapper.getPublishPath()
if devicePath != publishPath {
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, publishPath)
}
if devicePath != globalMapPath {
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, globalMapPath)
// Check if NodeStageVolume staged to the right path
stagingPath := csiMapper.getStagingPath()
svols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
svol, ok := svols[csiMapper.volumeID]
if !ok {
t.Error("csi server may not have received NodeStageVolume call")
}
if svol.Path != stagingPath {
t.Errorf("csi server expected device path %s, got %s", stagingPath, svol.Path)
}
vols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if vols[csiMapper.volumeID] != devicePath {
// Check if NodePublishVolume published to the right path
pvols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
pvol, ok := pvols[csiMapper.volumeID]
if !ok {
t.Error("csi server may not have received NodePublishVolume call")
}
if pvol.Path != publishPath {
t.Errorf("csi server expected path %s, got %s", publishPath, pvol.Path)
}
}
func TestBlockMapperMapDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv", t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
pvName := pv.GetName()
nodeName := string(plug.host.GetNodeName())
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// MapDevice
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to create new mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
csiMapper.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName))
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = true
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
@ -179,6 +315,16 @@ func TestBlockMapperMapDevice(t *testing.T) {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
// Actual SetupDevice should create a symlink to or a bind mout of device in devicePath.
// Create dummy file there before calling MapDevice to test it properly.
fd, err := os.Create(devicePath)
if err != nil {
t.Fatalf("mapper failed to create dummy file in devicePath: %v", err)
}
if err := fd.Close(); err != nil {
t.Fatalf("mapper failed to close dummy file in devicePath: %v", err)
}
// Map device to global and pod device map path
volumeMapPath, volName := csiMapper.GetPodDeviceMapPath()
err = csiMapper.MapDevice(devicePath, globalMapPath, volumeMapPath, volName, csiMapper.podUID)
@ -186,33 +332,48 @@ func TestBlockMapperMapDevice(t *testing.T) {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
if _, err := os.Stat(filepath.Join(volumeMapPath, volName)); err != nil {
// Check if symlink {globalMapPath}/{podUID} exists
globalMapFilePath := filepath.Join(globalMapPath, string(csiMapper.podUID))
if _, err := os.Stat(globalMapFilePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("mapper.MapDevice failed, volume path not created: %s", volumeMapPath)
t.Errorf("mapper.MapDevice failed, symlink in globalMapPath not created: %v", err)
t.Errorf("mapper.MapDevice devicePath:%v, globalMapPath: %v, globalMapFilePath: %v",
devicePath, globalMapPath, globalMapFilePath)
} else {
t.Errorf("mapper.MapDevice failed: %v", err)
}
}
pubs := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMapper.volumeID] != volumeMapPath {
t.Error("csi server may not have received NodePublishVolume call")
// Check if symlink {volumeMapPath}/{volName} exists
volumeMapFilePath := filepath.Join(volumeMapPath, volName)
if _, err := os.Stat(volumeMapFilePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("mapper.MapDevice failed, symlink in volumeMapPath not created: %v", err)
} else {
t.Errorf("mapper.MapDevice failed: %v", err)
}
}
}
func TestBlockMapperTearDownDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
nil,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
_, spec, pv, err := prepareBlockMapperTest(plug, "test-pv", t)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
// save volume data
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
@ -250,15 +411,15 @@ func TestBlockMapperTearDownDevice(t *testing.T) {
t.Fatal(err)
}
// ensure csi client call and node unstaged
vols := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if _, ok := vols[csiUnmapper.volumeID]; ok {
t.Error("csi server may not have received NodeUnstageVolume call")
}
// ensure csi client call and node unpblished
pubs := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if _, ok := pubs[csiUnmapper.volumeID]; ok {
t.Error("csi server may not have received NodeUnpublishVolume call")
}
// ensure csi client call and node unstaged
vols := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if _, ok := vols[csiUnmapper.volumeID]; ok {
t.Error("csi server may not have received NodeUnstageVolume call")
}
}

View File

@ -20,18 +20,26 @@ import (
"context"
"errors"
"fmt"
"io"
"net"
"time"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc"
api "k8s.io/api/core/v1"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
csipbv0 "k8s.io/kubernetes/pkg/volume/csi/csiv0"
)
type csiClient interface {
NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error)
NodePublishVolume(
ctx context.Context,
volumeid string,
@ -39,10 +47,11 @@ type csiClient interface {
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
nodePublishSecrets map[string]string,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error
NodeUnpublishVolume(
ctx context.Context,
@ -55,24 +64,176 @@ type csiClient interface {
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
nodeStageSecrets map[string]string,
volumeAttribs map[string]string,
secrets map[string]string,
volumeContext map[string]string,
) error
NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error
NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error)
NodeSupportsStageUnstage(ctx context.Context) (bool, error)
}
// Strongly typed address
type csiAddr string
// Strongly typed driver name
type csiDriverName string
// csiClient encapsulates all csi-plugin methods
type csiDriverClient struct {
driverName string
nodeClient csipb.NodeClient
driverName csiDriverName
addr csiAddr
nodeV1ClientCreator nodeV1ClientCreator
nodeV0ClientCreator nodeV0ClientCreator
}
var _ csiClient = &csiDriverClient{}
func newCsiDriverClient(driverName string) *csiDriverClient {
c := &csiDriverClient{driverName: driverName}
return c
type nodeV1ClientCreator func(addr csiAddr) (
nodeClient csipbv1.NodeClient,
closer io.Closer,
err error,
)
type nodeV0ClientCreator func(addr csiAddr) (
nodeClient csipbv0.NodeClient,
closer io.Closer,
err error,
)
// newV1NodeClient creates a new NodeClient with the internally used gRPC
// connection set up. It also returns a closer which must to be called to close
// the gRPC connection when the NodeClient is not used anymore.
// This is the default implementation for the nodeV1ClientCreator, used in
// newCsiDriverClient.
func newV1NodeClient(addr csiAddr) (nodeClient csipbv1.NodeClient, closer io.Closer, err error) {
var conn *grpc.ClientConn
conn, err = newGrpcConn(addr)
if err != nil {
return nil, nil, err
}
nodeClient = csipbv1.NewNodeClient(conn)
return nodeClient, conn, nil
}
// newV0NodeClient creates a new NodeClient with the internally used gRPC
// connection set up. It also returns a closer which must to be called to close
// the gRPC connection when the NodeClient is not used anymore.
// This is the default implementation for the nodeV1ClientCreator, used in
// newCsiDriverClient.
func newV0NodeClient(addr csiAddr) (nodeClient csipbv0.NodeClient, closer io.Closer, err error) {
var conn *grpc.ClientConn
conn, err = newGrpcConn(addr)
if err != nil {
return nil, nil, err
}
nodeClient = csipbv0.NewNodeClient(conn)
return nodeClient, conn, nil
}
func newCsiDriverClient(driverName csiDriverName) (*csiDriverClient, error) {
if driverName == "" {
return nil, fmt.Errorf("driver name is empty")
}
addr := fmt.Sprintf(csiAddrTemplate, driverName)
requiresV0Client := true
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) {
var existingDriver csiDriver
driverExists := false
func() {
csiDrivers.RLock()
defer csiDrivers.RUnlock()
existingDriver, driverExists = csiDrivers.driversMap[string(driverName)]
}()
if !driverExists {
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
}
addr = existingDriver.driverEndpoint
requiresV0Client = versionRequiresV0Client(existingDriver.highestSupportedVersion)
}
nodeV1ClientCreator := newV1NodeClient
nodeV0ClientCreator := newV0NodeClient
if requiresV0Client {
nodeV1ClientCreator = nil
} else {
nodeV0ClientCreator = nil
}
return &csiDriverClient{
driverName: driverName,
addr: csiAddr(addr),
nodeV1ClientCreator: nodeV1ClientCreator,
nodeV0ClientCreator: nodeV0ClientCreator,
}, nil
}
func (c *csiDriverClient) NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
klog.V(4).Info(log("calling NodeGetInfo rpc"))
if c.nodeV1ClientCreator != nil {
return c.nodeGetInfoV1(ctx)
} else if c.nodeV0ClientCreator != nil {
return c.nodeGetInfoV0(ctx)
}
err = fmt.Errorf("failed to call NodeGetInfo. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
return nodeID, maxVolumePerNode, accessibleTopology, err
}
func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return "", 0, nil, err
}
defer closer.Close()
res, err := nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{})
if err != nil {
return "", 0, nil, err
}
topology := res.GetAccessibleTopology()
if topology != nil {
accessibleTopology = topology.Segments
}
return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil
}
func (c *csiDriverClient) nodeGetInfoV0(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return "", 0, nil, err
}
defer closer.Close()
res, err := nodeClient.NodeGetInfo(ctx, &csipbv0.NodeGetInfoRequest{})
if err != nil {
return "", 0, nil, err
}
topology := res.GetAccessibleTopology()
if topology != nil {
accessibleTopology = topology.Segments
}
return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil
}
func (c *csiDriverClient) NodePublishVolume(
@ -82,36 +243,82 @@ func (c *csiDriverClient) NodePublishVolume(
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
nodePublishSecrets map[string]string,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error {
glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
klog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
}
if targetPath == "" {
return errors.New("missing target path")
}
if c.nodeV1ClientCreator != nil {
return c.nodePublishVolumeV1(
ctx,
volID,
readOnly,
stagingTargetPath,
targetPath,
accessMode,
publishContext,
volumeContext,
secrets,
fsType,
mountOptions,
)
} else if c.nodeV0ClientCreator != nil {
return c.nodePublishVolumeV0(
ctx,
volID,
readOnly,
stagingTargetPath,
targetPath,
accessMode,
publishContext,
volumeContext,
secrets,
fsType,
mountOptions,
)
}
conn, err := newGrpcConn(c.driverName)
return fmt.Errorf("failed to call NodePublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodePublishVolumeV1(
ctx context.Context,
volID string,
readOnly bool,
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishInfo: volumeInfo,
VolumeAttributes: volumeAttribs,
NodePublishSecrets: nodePublishSecrets,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
req := &csipbv1.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishContext: publishContext,
VolumeContext: volumeContext,
Secrets: secrets,
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV1(accessMode),
},
},
}
@ -120,13 +327,67 @@ func (c *csiDriverClient) NodePublishVolume(
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
Block: &csipb.VolumeCapability_BlockVolume{},
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
},
}
}
_, err = nodeClient.NodePublishVolume(ctx, req)
return err
}
func (c *csiDriverClient) nodePublishVolumeV0(
ctx context.Context,
volID string,
readOnly bool,
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv0.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishInfo: publishContext,
VolumeAttributes: volumeContext,
NodePublishSecrets: secrets,
VolumeCapability: &csipbv0.VolumeCapability{
AccessMode: &csipbv0.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV0(accessMode),
},
},
}
if stagingTargetPath != "" {
req.StagingTargetPath = stagingTargetPath
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{
Block: &csipbv0.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{
Mount: &csipbv0.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
},
}
}
@ -136,7 +397,7 @@ func (c *csiDriverClient) NodePublishVolume(
}
func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
klog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
}
@ -144,14 +405,39 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string,
return errors.New("missing target path")
}
conn, err := newGrpcConn(c.driverName)
if c.nodeV1ClientCreator != nil {
return c.nodeUnpublishVolumeV1(ctx, volID, targetPath)
} else if c.nodeV0ClientCreator != nil {
return c.nodeUnpublishVolumeV0(ctx, volID, targetPath)
}
return fmt.Errorf("failed to call NodeUnpublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodeUnpublishVolumeV1(ctx context.Context, volID string, targetPath string) error {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodeUnpublishVolumeRequest{
req := &csipbv1.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
_, err = nodeClient.NodeUnpublishVolume(ctx, req)
return err
}
func (c *csiDriverClient) nodeUnpublishVolumeV0(ctx context.Context, volID string, targetPath string) error {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv0.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
@ -162,14 +448,14 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string,
func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
volID string,
publishInfo map[string]string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
nodeStageSecrets map[string]string,
volumeAttribs map[string]string,
secrets map[string]string,
volumeContext map[string]string,
) error {
glog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
klog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
if volID == "" {
return errors.New("missing volume id")
}
@ -177,33 +463,96 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
return errors.New("missing staging target path")
}
conn, err := newGrpcConn(c.driverName)
if c.nodeV1ClientCreator != nil {
return c.nodeStageVolumeV1(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext)
} else if c.nodeV0ClientCreator != nil {
return c.nodeStageVolumeV0(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext)
}
return fmt.Errorf("failed to call NodeStageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodeStageVolumeV1(
ctx context.Context,
volID string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
secrets map[string]string,
volumeContext map[string]string,
) error {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodeStageVolumeRequest{
req := &csipbv1.NodeStageVolumeRequest{
VolumeId: volID,
PublishInfo: publishInfo,
PublishContext: publishContext,
StagingTargetPath: stagingTargetPath,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV1(accessMode),
},
},
NodeStageSecrets: nodeStageSecrets,
VolumeAttributes: volumeAttribs,
Secrets: secrets,
VolumeContext: volumeContext,
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
Block: &csipb.VolumeCapability_BlockVolume{},
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
},
}
}
_, err = nodeClient.NodeStageVolume(ctx, req)
return err
}
func (c *csiDriverClient) nodeStageVolumeV0(
ctx context.Context,
volID string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
secrets map[string]string,
volumeContext map[string]string,
) error {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv0.NodeStageVolumeRequest{
VolumeId: volID,
PublishInfo: publishContext,
StagingTargetPath: stagingTargetPath,
VolumeCapability: &csipbv0.VolumeCapability{
AccessMode: &csipbv0.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV0(accessMode),
},
},
NodeStageSecrets: secrets,
VolumeAttributes: volumeContext,
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{
Block: &csipbv0.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{
Mount: &csipbv0.VolumeCapability_MountVolume{
FsType: fsType,
},
}
@ -214,7 +563,7 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
}
func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
klog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
if volID == "" {
return errors.New("missing volume id")
}
@ -222,14 +571,23 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT
return errors.New("missing staging target path")
}
conn, err := newGrpcConn(c.driverName)
if c.nodeV1ClientCreator != nil {
return c.nodeUnstageVolumeV1(ctx, volID, stagingTargetPath)
} else if c.nodeV0ClientCreator != nil {
return c.nodeUnstageVolumeV0(ctx, volID, stagingTargetPath)
}
return fmt.Errorf("failed to call NodeUnstageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodeUnstageVolumeV1(ctx context.Context, volID, stagingTargetPath string) error {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodeUnstageVolumeRequest{
req := &csipbv1.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
@ -237,57 +595,128 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT
return err
}
func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
glog.V(4).Info(log("calling NodeGetCapabilities rpc"))
conn, err := newGrpcConn(c.driverName)
func (c *csiDriverClient) nodeUnstageVolumeV0(ctx context.Context, volID, stagingTargetPath string) error {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return nil, err
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
defer closer.Close()
req := &csipb.NodeGetCapabilitiesRequest{}
req := &csipbv0.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
_, err = nodeClient.NodeUnstageVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) {
klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsStageUnstage"))
if c.nodeV1ClientCreator != nil {
return c.nodeSupportsStageUnstageV1(ctx)
} else if c.nodeV0ClientCreator != nil {
return c.nodeSupportsStageUnstageV0(ctx)
}
return false, fmt.Errorf("failed to call NodeSupportsStageUnstage. Both nodeV1ClientCreator and nodeV0ClientCreator are nil")
}
func (c *csiDriverClient) nodeSupportsStageUnstageV1(ctx context.Context) (bool, error) {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr)
if err != nil {
return false, err
}
defer closer.Close()
req := &csipbv1.NodeGetCapabilitiesRequest{}
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return nil, err
return false, err
}
return resp.GetCapabilities(), nil
capabilities := resp.GetCapabilities()
stageUnstageSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
stageUnstageSet = true
}
}
return stageUnstageSet, nil
}
func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode {
func (c *csiDriverClient) nodeSupportsStageUnstageV0(ctx context.Context) (bool, error) {
nodeClient, closer, err := c.nodeV0ClientCreator(c.addr)
if err != nil {
return false, err
}
defer closer.Close()
req := &csipbv0.NodeGetCapabilitiesRequest{}
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return false, err
}
capabilities := resp.GetCapabilities()
stageUnstageSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipbv0.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
stageUnstageSet = true
}
}
return stageUnstageSet, nil
}
func asCSIAccessModeV1(am api.PersistentVolumeAccessMode) csipbv1.VolumeCapability_AccessMode_Mode {
switch am {
case api.ReadWriteOnce:
return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
return csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case api.ReadOnlyMany:
return csipb.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.ReadWriteMany:
return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
}
return csipb.VolumeCapability_AccessMode_UNKNOWN
return csipbv1.VolumeCapability_AccessMode_UNKNOWN
}
func newGrpcConn(driverName string) (*grpc.ClientConn, error) {
if driverName == "" {
return nil, fmt.Errorf("driver name is empty")
}
addr := fmt.Sprintf(csiAddrTemplate, driverName)
// TODO once KubeletPluginsWatcher graduates to beta, remove FeatureGate check
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) {
driver, ok := csiDrivers.driversMap[driverName]
if !ok {
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
}
addr = driver.driverEndpoint
func asCSIAccessModeV0(am api.PersistentVolumeAccessMode) csipbv0.VolumeCapability_AccessMode_Mode {
switch am {
case api.ReadWriteOnce:
return csipbv0.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case api.ReadOnlyMany:
return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.ReadWriteMany:
return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
}
return csipbv0.VolumeCapability_AccessMode_UNKNOWN
}
func newGrpcConn(addr csiAddr) (*grpc.ClientConn, error) {
network := "unix"
glog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr))
klog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr))
return grpc.Dial(
addr,
string(addr),
grpc.WithInsecure(),
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
return net.Dial(network, target)
}),
)
}
func versionRequiresV0Client(version *utilversion.Version) bool {
if version != nil && version.Major() == 0 {
return true
}
return false
}

View File

@ -19,9 +19,11 @@ package csi
import (
"context"
"errors"
"io"
"reflect"
"testing"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
api "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume/csi/fake"
)
@ -38,6 +40,19 @@ func newFakeCsiDriverClient(t *testing.T, stagingCapable bool) *fakeCsiDriverCli
}
}
func (c *fakeCsiDriverClient) NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
resp, err := c.nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{})
topology := resp.GetAccessibleTopology()
if topology != nil {
accessibleTopology = topology.Segments
}
return resp.GetNodeId(), resp.GetMaxVolumesPerNode(), accessibleTopology, err
}
func (c *fakeCsiDriverClient) NodePublishVolume(
ctx context.Context,
volID string,
@ -45,26 +60,28 @@ func (c *fakeCsiDriverClient) NodePublishVolume(
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
nodePublishSecrets map[string]string,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
) error {
c.t.Log("calling fake.NodePublishVolume...")
req := &csipb.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishInfo: volumeInfo,
VolumeAttributes: volumeAttribs,
NodePublishSecrets: nodePublishSecrets,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
req := &csipbv1.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishContext: publishContext,
VolumeContext: volumeContext,
Secrets: secrets,
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV1(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
AccessType: &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
},
},
},
@ -76,7 +93,7 @@ func (c *fakeCsiDriverClient) NodePublishVolume(
func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
c.t.Log("calling fake.NodeUnpublishVolume...")
req := &csipb.NodeUnpublishVolumeRequest{
req := &csipbv1.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
@ -87,30 +104,30 @@ func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID str
func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
volID string,
publishInfo map[string]string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
nodeStageSecrets map[string]string,
volumeAttribs map[string]string,
secrets map[string]string,
volumeContext map[string]string,
) error {
c.t.Log("calling fake.NodeStageVolume...")
req := &csipb.NodeStageVolumeRequest{
req := &csipbv1.NodeStageVolumeRequest{
VolumeId: volID,
PublishInfo: publishInfo,
PublishContext: publishContext,
StagingTargetPath: stagingTargetPath,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: asCSIAccessModeV1(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
AccessType: &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
},
},
},
NodeStageSecrets: nodeStageSecrets,
VolumeAttributes: volumeAttribs,
Secrets: secrets,
VolumeContext: volumeContext,
}
_, err := c.nodeClient.NodeStageVolume(ctx, req)
@ -119,7 +136,7 @@ func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
c.t.Log("calling fake.NodeUnstageVolume...")
req := &csipb.NodeUnstageVolumeRequest{
req := &csipbv1.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
@ -127,20 +144,109 @@ func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stag
return err
}
func (c *fakeCsiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
c.t.Log("calling fake.NodeGetCapabilities...")
req := &csipb.NodeGetCapabilitiesRequest{}
func (c *fakeCsiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) {
c.t.Log("calling fake.NodeGetCapabilities for NodeSupportsStageUnstage...")
req := &csipbv1.NodeGetCapabilitiesRequest{}
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return nil, err
return false, err
}
return resp.GetCapabilities(), nil
capabilities := resp.GetCapabilities()
stageUnstageSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME {
stageUnstageSet = true
}
}
return stageUnstageSet, nil
}
func setupClient(t *testing.T, stageUnstageSet bool) csiClient {
return newFakeCsiDriverClient(t, stageUnstageSet)
}
func checkErr(t *testing.T, expectedAnError bool, actualError error) {
t.Helper()
errOccurred := actualError != nil
if expectedAnError && !errOccurred {
t.Error("expected an error")
}
if !expectedAnError && errOccurred {
t.Errorf("expected no error, got: %v", actualError)
}
}
func TestClientNodeGetInfo(t *testing.T) {
testCases := []struct {
name string
expectedNodeID string
expectedMaxVolumePerNode int64
expectedAccessibleTopology map[string]string
mustFail bool
err error
}{
{
name: "test ok",
expectedNodeID: "node1",
expectedMaxVolumePerNode: 16,
expectedAccessibleTopology: map[string]string{"com.example.csi-topology/zone": "zone1"},
},
{
name: "grpc error",
mustFail: true,
err: errors.New("grpc error"),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
nodeClient.SetNodeGetInfoResp(&csipbv1.NodeGetInfoResponse{
NodeId: tc.expectedNodeID,
MaxVolumesPerNode: tc.expectedMaxVolumePerNode,
AccessibleTopology: &csipbv1.Topology{
Segments: tc.expectedAccessibleTopology,
},
})
return nodeClient, fakeCloser, nil
},
}
nodeID, maxVolumePerNode, accessibleTopology, err := client.NodeGetInfo(context.Background())
checkErr(t, tc.mustFail, err)
if nodeID != tc.expectedNodeID {
t.Errorf("expected nodeID: %v; got: %v", tc.expectedNodeID, nodeID)
}
if maxVolumePerNode != tc.expectedMaxVolumePerNode {
t.Errorf("expected maxVolumePerNode: %v; got: %v", tc.expectedMaxVolumePerNode, maxVolumePerNode)
}
if !reflect.DeepEqual(accessibleTopology, tc.expectedAccessibleTopology) {
t.Errorf("expected accessibleTopology: %v; got: %v", tc.expectedAccessibleTopology, accessibleTopology)
}
if !tc.mustFail {
fakeCloser.Check()
}
}
}
func TestClientNodePublishVolume(t *testing.T) {
testCases := []struct {
name string
@ -157,11 +263,18 @@ func TestClientNodePublishVolume(t *testing.T) {
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false)
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
err := client.NodePublishVolume(
context.Background(),
tc.volID,
@ -173,10 +286,12 @@ func TestClientNodePublishVolume(t *testing.T) {
map[string]string{"attr0": "val0"},
map[string]string{},
tc.fsType,
[]string{},
)
checkErr(t, tc.mustFail, err)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
if !tc.mustFail {
fakeCloser.Check()
}
}
}
@ -195,14 +310,23 @@ func TestClientNodeUnpublishVolume(t *testing.T) {
{name: "grpc error", volID: "vol-test", targetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false)
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
err := client.NodeUnpublishVolume(context.Background(), tc.volID, tc.targetPath)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
checkErr(t, tc.mustFail, err)
if !tc.mustFail {
fakeCloser.Check()
}
}
}
@ -213,7 +337,7 @@ func TestClientNodeStageVolume(t *testing.T) {
volID string
stagingTargetPath string
fsType string
secret map[string]string
secrets map[string]string
mustFail bool
err error
}{
@ -224,11 +348,18 @@ func TestClientNodeStageVolume(t *testing.T) {
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false)
for _, tc := range testCases {
t.Logf("Running test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
err := client.NodeStageVolume(
context.Background(),
tc.volID,
@ -236,12 +367,13 @@ func TestClientNodeStageVolume(t *testing.T) {
tc.stagingTargetPath,
tc.fsType,
api.ReadWriteOnce,
tc.secret,
tc.secrets,
map[string]string{"attr0": "val0"},
)
checkErr(t, tc.mustFail, err)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
if !tc.mustFail {
fakeCloser.Check()
}
}
}
@ -260,17 +392,26 @@ func TestClientNodeUnstageVolume(t *testing.T) {
{name: "grpc error", volID: "vol-test", stagingTargetPath: "/test/path", mustFail: true, err: errors.New("grpc error")},
}
client := setupClient(t, false)
for _, tc := range testCases {
t.Logf("Running test case: %s", tc.name)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
err := client.NodeUnstageVolume(
context.Background(),
tc.volID, tc.stagingTargetPath,
)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
checkErr(t, tc.mustFail, err)
if !tc.mustFail {
fakeCloser.Check()
}
}
}

View File

@ -23,12 +23,14 @@ import (
"os"
"path"
"github.com/golang/glog"
"k8s.io/klog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
@ -49,21 +51,22 @@ var (
"nodeName",
"attachmentID",
}
currentPodInfoMountVersion = "v1"
)
type csiMountMgr struct {
csiClient csiClient
k8s kubernetes.Interface
plugin *csiPlugin
driverName string
volumeID string
specVolumeID string
readOnly bool
spec *volume.Spec
pod *api.Pod
podUID types.UID
options volume.VolumeOptions
volumeInfo map[string]string
csiClient csiClient
k8s kubernetes.Interface
plugin *csiPlugin
driverName csiDriverName
volumeID string
specVolumeID string
readOnly bool
spec *volume.Spec
pod *api.Pod
podUID types.UID
options volume.VolumeOptions
publishContext map[string]string
volume.MetricsNil
}
@ -72,7 +75,7 @@ var _ volume.Volume = &csiMountMgr{}
func (c *csiMountMgr) GetPath() string {
dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount")
glog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
klog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
return dir
}
@ -93,61 +96,51 @@ func (c *csiMountMgr) SetUp(fsGroup *int64) error {
}
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
mounted, err := isDirMounted(c.plugin, dir)
if err != nil {
glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir))
klog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir))
return err
}
if mounted {
glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir))
klog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir))
return nil
}
csiSource, err := getCSISourceFromSpec(c.spec)
if err != nil {
glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
klog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
return err
}
csi := c.csiClient
nodeName := string(c.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so
deviceMountPath := ""
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
glog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err))
klog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err))
return err
}
if stageUnstageSet {
deviceMountPath, err = makeDeviceMountPath(c.plugin, c.spec)
if err != nil {
glog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err))
klog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err))
return err
}
}
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
if c.volumeInfo == nil {
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if c.publishContext == nil {
nodeName := string(c.plugin.host.GetNodeName())
c.publishContext, err = c.plugin.getPublishContext(c.k8s, c.volumeID, string(c.driverName), nodeName)
if err != nil {
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
return err
}
if attachment == nil {
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
}
c.volumeInfo = attachment.Status.AttachmentMetadata
}
attribs := csiSource.VolumeAttributes
@ -163,10 +156,10 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
// create target_dir before call to NodePublish
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
klog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", dir))
klog.V(4).Info(log("created target path successfully [%s]", dir))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := api.ReadWriteOnce
@ -174,6 +167,22 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
}
// Inject pod information into volume_attributes
podAttrs, err := c.podAttributes()
if err != nil {
klog.Error(log("mouter.SetUpAt failed to assemble volume attributes: %v", err))
return err
}
if podAttrs != nil {
if attribs == nil {
attribs = podAttrs
} else {
for k, v := range podAttrs {
attribs[k] = v
}
}
}
fsType := csiSource.FSType
err = csi.NodePublishVolume(
ctx,
@ -182,58 +191,85 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
deviceMountPath,
dir,
accessMode,
c.volumeInfo,
c.publishContext,
attribs,
nodePublishSecrets,
fsType,
c.spec.PersistentVolume.Spec.MountOptions,
)
if err != nil {
glog.Errorf(log("mounter.SetupAt failed: %v", err))
klog.Errorf(log("mounter.SetupAt failed: %v", err))
if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil {
glog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
klog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
}
return err
}
// apply volume ownership
if !c.readOnly && fsGroup != nil {
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
// attempt to rollback mount.
glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err))
glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID))
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
glog.Error(log(
"mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)",
c.volumeID, unpubErr, err,
))
return fmt.Errorf("%v (caused by %v)", unpubErr, err)
}
// The following logic is derived from https://github.com/kubernetes/kubernetes/issues/66323
// if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
// if fstype is provided and pv.AccessMode == ReadWriteOnly, then apply fsgroup
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
glog.Error(log(
"mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)",
dir, unmountErr, err,
))
return fmt.Errorf("%v (caused by %v)", unmountErr, err)
}
return err
err = c.applyFSGroup(fsType, fsGroup)
if err != nil {
// attempt to rollback mount.
fsGrpErr := fmt.Errorf("applyFSGroup failed for vol %s: %v", c.volumeID, err)
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
klog.Error(log("NodeUnpublishVolume failed for [%s]: %v", c.volumeID, unpubErr))
return fsGrpErr
}
glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID))
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
klog.Error(log("removeMountDir failed for [%s]: %v", dir, unmountErr))
return fsGrpErr
}
return fsGrpErr
}
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
klog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
return nil
}
func (c *csiMountMgr) podAttributes() (map[string]string, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
return nil, nil
}
if c.plugin.csiDriverLister == nil {
return nil, errors.New("CSIDriver lister does not exist")
}
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
if err != nil {
if apierrs.IsNotFound(err) {
klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName))
return nil, nil
}
return nil, err
}
// if PodInfoOnMountVersion is not set or not v1 we do not set pod attributes
if csiDriver.Spec.PodInfoOnMountVersion == nil || *csiDriver.Spec.PodInfoOnMountVersion != currentPodInfoMountVersion {
klog.V(4).Infof(log("CSIDriver %q does not require pod information", c.driverName))
return nil, nil
}
attrs := map[string]string{
"csi.storage.k8s.io/pod.name": c.pod.Name,
"csi.storage.k8s.io/pod.namespace": c.pod.Namespace,
"csi.storage.k8s.io/pod.uid": string(c.pod.UID),
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
}
klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))
return attrs, nil
}
func (c *csiMountMgr) GetAttributes() volume.Attributes {
mounter := c.plugin.host.GetMounter(c.plugin.GetPluginName())
path := c.GetPath()
supportSelinux, err := mounter.GetSELinuxSupport(path)
if err != nil {
glog.V(2).Info(log("error checking for SELinux support: %s", err))
klog.V(2).Info(log("error checking for SELinux support: %s", err))
// Best guess
supportSelinux = false
}
@ -251,19 +287,19 @@ func (c *csiMountMgr) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *csiMountMgr) TearDownAt(dir string) error {
glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir))
klog.V(4).Infof(log("Unmounter.TearDown(%s)", dir))
// is dir even mounted ?
// TODO (vladimirvivien) this check may not work for an emptyDir or local storage
// see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524
mounted, err := isDirMounted(c.plugin, dir)
if err != nil {
glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err))
klog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err))
return err
}
if !mounted {
glog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir))
klog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir))
return nil
}
@ -274,16 +310,53 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
defer cancel()
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
glog.Errorf(log("mounter.TearDownAt failed: %v", err))
klog.Errorf(log("mounter.TearDownAt failed: %v", err))
return err
}
// clean mount point dir
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
klog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
return err
}
glog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir))
klog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir))
return nil
}
// applyFSGroup applies the volume ownership it derives its logic
// from https://github.com/kubernetes/kubernetes/issues/66323
// 1) if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
// 2) if fstype is provided and pv.AccessMode == ReadWriteOnly and !c.spec.ReadOnly then apply fsgroup
func (c *csiMountMgr) applyFSGroup(fsType string, fsGroup *int64) error {
if fsGroup != nil {
if fsType == "" {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided"))
return nil
}
accessModes := c.spec.PersistentVolume.Spec.AccessModes
if c.spec.PersistentVolume.Spec.AccessModes == nil {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided"))
return nil
}
if !hasReadWriteOnce(accessModes) {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode"))
return nil
}
if c.readOnly {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, volume is readOnly"))
return nil
}
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
return err
}
klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *fsGroup, c.volumeID))
}
return nil
}
@ -293,7 +366,7 @@ func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
mounter := plug.host.GetMounter(plug.GetPluginName())
notMnt, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
klog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
return false, err
}
return !notMnt, nil
@ -301,39 +374,39 @@ func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
// removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir
func removeMountDir(plug *csiPlugin, mountPath string) error {
glog.V(4).Info(log("removing mount path [%s]", mountPath))
klog.V(4).Info(log("removing mount path [%s]", mountPath))
if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil {
glog.Error(log("failed while checking mount path stat [%s]", pathErr))
klog.Error(log("failed while checking mount path stat [%s]", pathErr))
return pathErr
} else if !pathExists {
glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath))
klog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath))
return nil
}
mounter := plug.host.GetMounter(plug.GetPluginName())
notMnt, err := mounter.IsLikelyNotMountPoint(mountPath)
if err != nil {
glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err))
klog.Error(log("mount dir removal failed [%s]: %v", mountPath, err))
return err
}
if notMnt {
glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
klog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to remove dir [%s]: %v", mountPath, err))
klog.Error(log("failed to remove dir [%s]: %v", mountPath, err))
return err
}
// remove volume data file as well
volPath := path.Dir(mountPath)
dataFile := path.Join(volPath, volDataFileName)
glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
klog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
klog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
return err
}
// remove volume path
glog.V(4).Info(log("deleting volume path [%s]", volPath))
klog.V(4).Info(log("deleting volume path [%s]", volPath))
if err := os.Remove(volPath); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to delete volume path [%s]: %v", volPath, err))
klog.Error(log("failed to delete volume path [%s]: %v", volPath, err))
return err
}
}

View File

@ -25,25 +25,35 @@ import (
"path"
"testing"
"reflect"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
fakeclient "k8s.io/client-go/kubernetes/fake"
csiapi "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
var (
testDriver = "test-driver"
testVol = "vol-123"
testns = "test-ns"
testPodUID = types.UID("test-pod")
testDriver = "test-driver"
testVol = "vol-123"
testns = "test-ns"
testPod = "test-pod"
testPodUID = types.UID("test-pod")
testAccount = "test-service-account"
)
func TestMounterGetPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
@ -65,6 +75,7 @@ func TestMounterGetPath(t *testing.T) {
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mounter, err := plug.NewMounter(
@ -85,90 +96,306 @@ func TestMounterGetPath(t *testing.T) {
}
}
func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIDriverRegistry, podInfoEnabled)()
tests := []struct {
name string
driver string
volumeContext map[string]string
expectedVolumeContext map[string]string
}{
{
name: "no pod info",
driver: "no-info",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "no CSIDriver -> no pod info",
driver: "unknown-driver",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "CSIDriver with PodInfoRequiredOnMount=nil -> no pod info",
driver: "nil",
volumeContext: nil,
expectedVolumeContext: nil,
},
{
name: "no pod info -> keep existing volumeContext",
driver: "no-info",
volumeContext: map[string]string{"foo": "bar"},
expectedVolumeContext: map[string]string{"foo": "bar"},
},
{
name: "add pod info",
driver: "info",
volumeContext: nil,
expectedVolumeContext: map[string]string{"csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
{
name: "add pod info -> keep existing volumeContext",
driver: "info",
volumeContext: map[string]string{"foo": "bar"},
expectedVolumeContext: map[string]string{"foo": "bar", "csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
}
emptyPodMountInfoVersion := ""
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
klog.Infof("Starting test %s", test.name)
fakeClient := fakeclient.NewSimpleClientset()
fakeCSIClient := fakecsi.NewSimpleClientset(
getCSIDriver("no-info", &emptyPodMountInfoVersion, nil),
getCSIDriver("info", &currentPodInfoMountVersion, nil),
getCSIDriver("nil", nil, nil),
)
plug, tmpDir := newTestPlugin(t, fakeClient, fakeCSIClient)
defer os.RemoveAll(tmpDir)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return plug.csiDriverInformer.Informer().HasSynced(), nil
})
}
registerFakePlugin(test.driver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, test.driver, testVol)
pv.Spec.CSI.VolumeAttributes = test.volumeContext
pv.Spec.MountOptions = []string{"foo=bar", "baz=qux"}
pvName := pv.GetName()
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{
ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
Spec: api.PodSpec{
ServiceAccountName: testAccount,
},
},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: "test-node",
Attacher: csiPluginName,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
},
}
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
// Mounter.SetUp()
fsGroup := int64(2000)
if err := csiMounter.SetUp(&fsGroup); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
vol, ok := pubs[csiMounter.volumeID]
if !ok {
t.Error("csi server may not have received NodePublishVolume call")
}
if vol.Path != csiMounter.GetPath() {
t.Errorf("csi server expected path %s, got %s", csiMounter.GetPath(), vol.Path)
}
if !reflect.DeepEqual(vol.MountFlags, pv.Spec.MountOptions) {
t.Errorf("csi server expected mount options %v, got %v", pv.Spec.MountOptions, vol.MountFlags)
}
if podInfoEnabled {
if !reflect.DeepEqual(vol.VolumeContext, test.expectedVolumeContext) {
t.Errorf("csi server expected volumeContext %+v, got %+v", test.expectedVolumeContext, vol.VolumeContext)
}
} else {
// CSIPodInfo feature is disabled, we expect no modifications to volumeContext.
if !reflect.DeepEqual(vol.VolumeContext, test.volumeContext) {
t.Errorf("csi server expected volumeContext %+v, got %+v", test.volumeContext, vol.VolumeContext)
}
}
})
}
}
func TestMounterSetUp(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
t.Run("WithCSIPodInfo", func(t *testing.T) {
MounterSetUpTests(t, true)
})
t.Run("WithoutCSIPodInfo", func(t *testing.T) {
MounterSetUpTests(t, false)
})
}
func TestMounterSetUpWithFSGroup(t *testing.T) {
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
tmpDir,
fakeClient,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
pvName := pv.GetName()
plug, tmpDir := newTestPlugin(t, fakeClient, nil)
defer os.RemoveAll(tmpDir)
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: "test-node",
Attacher: csiPluginName,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
testCases := []struct {
name string
accessModes []api.PersistentVolumeAccessMode
readOnly bool
fsType string
setFsGroup bool
fsGroup int64
}{
{
name: "default fstype, with no fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
},
Status: storage.VolumeAttachmentStatus{
Attached: false,
AttachError: nil,
DetachError: nil,
{
name: "default fstype with fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteMany,
api.ReadOnlyMany,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: true,
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO provided (should apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
}
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
// Mounter.SetUp()
fsGroup := int64(2000)
if err := csiMounter.SetUp(&fsGroup); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
for i, tc := range testCases {
t.Logf("Running test %s", tc.name)
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
volName := fmt.Sprintf("test-vol-%d", i)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, volName)
pv.Spec.AccessModes = tc.accessModes
pvName := pv.GetName()
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
spec := volume.NewSpecFromPersistentVolume(pv, tc.readOnly)
if tc.fsType != "" {
spec.PersistentVolume.Spec.CSI.FSType = tc.fsType
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID] != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
mounter, err := plug.NewMounter(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := makeTestAttachment(attachID, "test-node", pvName)
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Errorf("failed to setup VolumeAttachment: %v", err)
continue
}
// Mounter.SetUp()
var fsGroupPtr *int64
if tc.setFsGroup {
fsGroup := tc.fsGroup
fsGroupPtr = &fsGroup
}
if err := csiMounter.SetUp(fsGroupPtr); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != len(tc.fsType) {
t.Errorf("file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
}
}
}
func TestUnmounterTeardown(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file prior to unmount
@ -216,7 +443,7 @@ func TestUnmounterTeardown(t *testing.T) {
}
func TestSaveVolumeData(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
@ -262,3 +489,15 @@ func TestSaveVolumeData(t *testing.T) {
}
}
}
func getCSIDriver(name string, podInfoMountVersion *string, attachable *bool) *csiapi.CSIDriver {
return &csiapi.CSIDriver{
ObjectMeta: meta.ObjectMeta{
Name: name,
},
Spec: csiapi.CSIDriverSpec{
PodInfoOnMountVersion: podInfoMountVersion,
AttachRequired: attachable,
},
}
}

View File

@ -21,19 +21,29 @@ import (
"fmt"
"os"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/golang/glog"
"context"
"k8s.io/klog"
api "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
csiapiinformer "k8s.io/csi-api/pkg/client/informers/externalversions"
csiinformer "k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1"
csilister "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
"k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
)
const (
@ -48,11 +58,18 @@ const (
volNameSep = "^"
volDataFileName = "vol_data.json"
fsTypeBlockName = "block"
// TODO: increase to something useful
csiResyncPeriod = time.Minute
)
var deprecatedSocketDirVersions = []string{"0.1.0", "0.2.0", "0.3.0", "0.4.0"}
type csiPlugin struct {
host volume.VolumeHost
blockEnabled bool
host volume.VolumeHost
blockEnabled bool
csiDriverLister csilister.CSIDriverLister
csiDriverInformer csiinformer.CSIDriverInformer
}
// ProbeVolumePlugins returns implemented plugins
@ -68,8 +85,9 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
var _ volume.VolumePlugin = &csiPlugin{}
type csiDriver struct {
driverName string
driverEndpoint string
driverName string
driverEndpoint string
highestSupportedVersion *utilversion.Version
}
type csiDriversStore struct {
@ -77,43 +95,163 @@ type csiDriversStore struct {
sync.RWMutex
}
// RegistrationHandler is the handler which is fed to the pluginwatcher API.
type RegistrationHandler struct {
}
// TODO (verult) consider using a struct instead of global variables
// csiDrivers map keep track of all registered CSI drivers on the node and their
// corresponding sockets
var csiDrivers csiDriversStore
var lm labelmanager.Interface
var nim nodeinfomanager.Interface
// RegistrationCallback is called by kubelet's plugin watcher upon detection
// PluginHandler is the plugin registration handler interface passed to the
// pluginwatcher module in kubelet
var PluginHandler = &RegistrationHandler{}
// ValidatePlugin is called by kubelet's plugin watcher upon detection
// of a new registration socket opened by CSI Driver registrar side car.
func RegistrationCallback(pluginName string, endpoint string, versions []string, socketPath string) (error, chan bool) {
func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error {
klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s, foundInDeprecatedDir: %v",
pluginName, endpoint, strings.Join(versions, ","), foundInDeprecatedDir))
glog.Infof(log("Callback from kubelet with plugin name: %s endpoint: %s versions: %s socket path: %s",
pluginName, endpoint, strings.Join(versions, ","), socketPath))
if endpoint == "" {
endpoint = socketPath
if foundInDeprecatedDir {
// CSI 0.x drivers used /var/lib/kubelet/plugins as the socket dir.
// This was deprecated as the socket dir for kubelet drivers, in lieu of a dedicated dir /var/lib/kubelet/plugins_registry
// The deprecated dir will only be allowed for a whitelisted set of old versions.
// CSI 1.x drivers should use the /var/lib/kubelet/plugins_registry
if !isDeprecatedSocketDirAllowed(versions) {
err := fmt.Errorf("socket for CSI driver %q versions %v was found in a deprecated dir. Drivers implementing CSI 1.x+ must use the new dir", pluginName, versions)
klog.Error(err)
return err
}
}
// Calling nodeLabelManager to update label for newly registered CSI driver
err := lm.AddLabels(pluginName)
_, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions)
return err
}
// RegisterPlugin is called when a plugin can be registered
func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string) error {
klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions)
if err != nil {
return err, nil
return err
}
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
csiDrivers.Lock()
defer csiDrivers.Unlock()
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint}
return nil, nil
func() {
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
// It's not necessary to lock the entire RegistrationCallback() function because only the CSI
// client depends on this driver map, and the CSI client does not depend on node information
// updated in the rest of the function.
csiDrivers.Lock()
defer csiDrivers.Unlock()
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint, highestSupportedVersion: highestSupportedVersion}
}()
// Get node info from the driver.
csi, err := newCsiDriverClient(csiDriverName(pluginName))
if err != nil {
return err
}
// TODO (verult) retry with exponential backoff, possibly added in csi client library.
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
driverNodeID, maxVolumePerNode, accessibleTopology, err := csi.NodeGetInfo(ctx)
if err != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed at CSI.NodeGetInfo: %v", err))
if unregErr := unregisterDriver(pluginName); unregErr != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous: %v", unregErr))
return unregErr
}
return err
}
err = nim.InstallCSIDriver(pluginName, driverNodeID, maxVolumePerNode, accessibleTopology)
if err != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed at AddNodeInfo: %v", err))
if unregErr := unregisterDriver(pluginName); unregErr != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr))
return unregErr
}
return err
}
return nil
}
func (h *RegistrationHandler) validateVersions(callerName, pluginName string, endpoint string, versions []string) (*utilversion.Version, error) {
if len(versions) == 0 {
err := fmt.Errorf("%s for CSI driver %q failed. Plugin returned an empty list for supported versions", callerName, pluginName)
klog.Error(err)
return nil, err
}
// Validate version
newDriverHighestVersion, err := highestSupportedVersion(versions)
if err != nil {
err := fmt.Errorf("%s for CSI driver %q failed. None of the versions specified %q are supported. err=%v", callerName, pluginName, versions, err)
klog.Error(err)
return nil, err
}
// Check for existing drivers with the same name
var existingDriver csiDriver
driverExists := false
func() {
csiDrivers.RLock()
defer csiDrivers.RUnlock()
existingDriver, driverExists = csiDrivers.driversMap[pluginName]
}()
if driverExists {
if !existingDriver.highestSupportedVersion.LessThan(newDriverHighestVersion) {
err := fmt.Errorf("%s for CSI driver %q failed. Another driver with the same name is already registered with a higher supported version: %q", callerName, pluginName, existingDriver.highestSupportedVersion)
klog.Error(err)
return nil, err
}
}
return newDriverHighestVersion, nil
}
// DeRegisterPlugin is called when a plugin removed its socket, signaling
// it is no longer available
func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) {
klog.V(4).Info(log("registrationHandler.DeRegisterPlugin request for plugin %s", pluginName))
if err := unregisterDriver(pluginName); err != nil {
klog.Error(log("registrationHandler.DeRegisterPlugin failed: %v", err))
}
}
func (p *csiPlugin) Init(host volume.VolumeHost) error {
glog.Info(log("plugin initializing..."))
p.host = host
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
csiClient := host.GetCSIClient()
if csiClient == nil {
klog.Warning("The client for CSI Custom Resources is not available, skipping informer initialization")
} else {
// Start informer for CSIDrivers.
factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod)
p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers()
p.csiDriverLister = p.csiDriverInformer.Lister()
go factory.Start(wait.NeverStop)
}
}
// Initializing csiDrivers map and label management channels
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
lm = labelmanager.NewLabelManager(host.GetNodeName(), host.GetKubeClient())
nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host)
// TODO(#70514) Init CSINodeInfo object if the CRD exists and create Driver
// objects for migrated drivers.
return nil
}
@ -127,7 +265,7 @@ func (p *csiPlugin) GetPluginName() string {
func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
csi, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err))
klog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err))
return "", err
}
@ -160,11 +298,14 @@ func (p *csiPlugin) NewMounter(
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("failed to get a kubernetes client"))
klog.Error(log("failed to get a kubernetes client"))
return nil, errors.New("failed to get a Kubernetes client")
}
csi := newCsiDriverClient(pvSource.Driver)
csi, err := newCsiDriverClient(csiDriverName(pvSource.Driver))
if err != nil {
return nil, err
}
mounter := &csiMountMgr{
plugin: p,
@ -172,7 +313,7 @@ func (p *csiPlugin) NewMounter(
spec: spec,
pod: pod,
podUID: pod.UID,
driverName: pvSource.Driver,
driverName: csiDriverName(pvSource.Driver),
volumeID: pvSource.VolumeHandle,
specVolumeID: spec.Name(),
csiClient: csi,
@ -184,10 +325,10 @@ func (p *csiPlugin) NewMounter(
dataDir := path.Dir(dir) // dropoff /mount at end
if err := os.MkdirAll(dataDir, 0750); err != nil {
glog.Error(log("failed to create dir %#v: %v", dataDir, err))
klog.Error(log("failed to create dir %#v: %v", dataDir, err))
return nil, err
}
glog.V(4).Info(log("created path successfully [%s]", dataDir))
klog.V(4).Info(log("created path successfully [%s]", dataDir))
// persist volume info data for teardown
node := string(p.host.GetNodeName())
@ -201,21 +342,21 @@ func (p *csiPlugin) NewMounter(
}
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
klog.Error(log("failed to save volume info data: %v", err))
if err := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
return nil, err
}
return nil, err
}
glog.V(4).Info(log("mounter created successfully"))
klog.V(4).Info(log("mounter created successfully"))
return mounter, nil
}
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
unmounter := &csiMountMgr{
plugin: p,
@ -228,27 +369,31 @@ func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmo
dataDir := path.Dir(dir) // dropoff /mount at end
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err))
klog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err))
return nil, err
}
unmounter.driverName = data[volDataKey.driverName]
unmounter.driverName = csiDriverName(data[volDataKey.driverName])
unmounter.volumeID = data[volDataKey.volHandle]
unmounter.csiClient = newCsiDriverClient(unmounter.driverName)
unmounter.csiClient, err = newCsiDriverClient(unmounter.driverName)
if err != nil {
return nil, err
}
return unmounter, nil
}
func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
klog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
volData, err := loadVolumeData(mountPath, volDataFileName)
if err != nil {
glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
klog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
return nil, err
}
glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
klog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
fsMode := api.PersistentVolumeFilesystem
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: volData[volDataKey.specVolID],
@ -260,6 +405,7 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
VolumeHandle: volData[volDataKey.volHandle],
},
},
VolumeMode: &fsMode,
},
}
@ -268,8 +414,11 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
func (p *csiPlugin) SupportsMountOption() bool {
// TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags
// to probe for the result for this method:w
return false
// to probe for the result for this method
// (bswartz) Until the CSI spec supports probing, our only option is to
// make plugins register their support for mount options or lack thereof
// directly with kubernetes.
return true
}
func (p *csiPlugin) SupportsBulkVolumeVerification() bool {
@ -279,10 +428,12 @@ func (p *csiPlugin) SupportsBulkVolumeVerification() bool {
// volume.AttachableVolumePlugin methods
var _ volume.AttachableVolumePlugin = &csiPlugin{}
var _ volume.DeviceMountableVolumePlugin = &csiPlugin{}
func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("unable to get kubernetes client from host"))
klog.Error(log("unable to get kubernetes client from host"))
return nil, errors.New("unable to get Kubernetes client")
}
@ -293,10 +444,14 @@ func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (p *csiPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return p.NewAttacher()
}
func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("unable to get kubernetes client from host"))
klog.Error(log("unable to get kubernetes client from host"))
return nil, errors.New("unable to get Kubernetes client")
}
@ -307,9 +462,13 @@ func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (p *csiPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return p.NewDetacher()
}
func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := p.host.GetMounter(p.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
return m.GetMountRefs(deviceMountPath)
}
// BlockVolumePlugin methods
@ -329,12 +488,15 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
return nil, err
}
glog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
client := newCsiDriverClient(pvSource.Driver)
klog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
client, err := newCsiDriverClient(csiDriverName(pvSource.Driver))
if err != nil {
return nil, err
}
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("failed to get a kubernetes client"))
klog.Error(log("failed to get a kubernetes client"))
return nil, errors.New("failed to get a Kubernetes client")
}
@ -343,9 +505,10 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
k8s: k8s,
plugin: p,
volumeID: pvSource.VolumeHandle,
driverName: pvSource.Driver,
driverName: csiDriverName(pvSource.Driver),
readOnly: readOnly,
spec: spec,
specName: spec.Name(),
podUID: podRef.UID,
}
@ -353,10 +516,10 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
dataDir := getVolumeDeviceDataDir(spec.Name(), p.host)
if err := os.MkdirAll(dataDir, 0750); err != nil {
glog.Error(log("failed to create data dir %s: %v", dataDir, err))
klog.Error(log("failed to create data dir %s: %v", dataDir, err))
return nil, err
}
glog.V(4).Info(log("created path successfully [%s]", dataDir))
klog.V(4).Info(log("created path successfully [%s]", dataDir))
// persist volume info data for teardown
node := string(p.host.GetNodeName())
@ -370,9 +533,9 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
}
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
klog.Error(log("failed to save volume info data: %v", err))
if err := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
return nil, err
}
return nil, err
@ -386,7 +549,7 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo
return nil, errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
klog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
unmapper := &csiBlockMapper{
plugin: p,
podUID: podUID,
@ -397,12 +560,15 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo
dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host)
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
klog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
return nil, err
}
unmapper.driverName = data[volDataKey.driverName]
unmapper.driverName = csiDriverName(data[volDataKey.driverName])
unmapper.volumeID = data[volDataKey.volHandle]
unmapper.csiClient = newCsiDriverClient(unmapper.driverName)
unmapper.csiClient, err = newCsiDriverClient(unmapper.driverName)
if err != nil {
return nil, err
}
return unmapper, nil
}
@ -412,16 +578,16 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP
return nil, errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
klog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
dataDir := getVolumeDeviceDataDir(specVolName, p.host)
volData, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
klog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
return nil, err
}
glog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
klog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
blockMode := api.PersistentVolumeBlock
pv := &api.PersistentVolume{
@ -441,3 +607,129 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP
return volume.NewSpecFromPersistentVolume(pv, false), nil
}
func (p *csiPlugin) skipAttach(driver string) (bool, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
return false, nil
}
if p.csiDriverLister == nil {
return false, errors.New("CSIDriver lister does not exist")
}
csiDriver, err := p.csiDriverLister.Get(driver)
if err != nil {
if apierrs.IsNotFound(err) {
// Don't skip attach if CSIDriver does not exist
return false, nil
}
return false, err
}
if csiDriver.Spec.AttachRequired != nil && *csiDriver.Spec.AttachRequired == false {
return true, nil
}
return false, nil
}
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
skip, err := p.skipAttach(driver)
if err != nil {
return nil, err
}
if skip {
return nil, nil
}
attachID := getAttachmentName(handle, driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := client.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
return nil, err // This err already has enough context ("VolumeAttachment xyz not found")
}
if attachment == nil {
err = errors.New("no existing VolumeAttachment found")
return nil, err
}
return attachment.Status.AttachmentMetadata, nil
}
func unregisterDriver(driverName string) error {
func() {
csiDrivers.Lock()
defer csiDrivers.Unlock()
delete(csiDrivers.driversMap, driverName)
}()
if err := nim.UninstallCSIDriver(driverName); err != nil {
klog.Errorf("Error uninstalling CSI driver: %v", err)
return err
}
return nil
}
// Return the highest supported version
func highestSupportedVersion(versions []string) (*utilversion.Version, error) {
if len(versions) == 0 {
return nil, fmt.Errorf("CSI driver reporting empty array for supported versions")
}
// Sort by lowest to highest version
sort.Slice(versions, func(i, j int) bool {
parsedVersionI, err := utilversion.ParseGeneric(versions[i])
if err != nil {
// Push bad values to the bottom
return true
}
parsedVersionJ, err := utilversion.ParseGeneric(versions[j])
if err != nil {
// Push bad values to the bottom
return false
}
return parsedVersionI.LessThan(parsedVersionJ)
})
for i := len(versions) - 1; i >= 0; i-- {
highestSupportedVersion, err := utilversion.ParseGeneric(versions[i])
if err != nil {
return nil, err
}
if highestSupportedVersion.Major() <= 1 {
return highestSupportedVersion, nil
}
}
return nil, fmt.Errorf("None of the CSI versions reported by this driver are supported")
}
// Only drivers that implement CSI 0.x are allowed to use deprecated socket dir.
func isDeprecatedSocketDirAllowed(versions []string) bool {
for _, version := range versions {
if isV0Version(version) {
return true
}
}
return false
}
func isV0Version(version string) bool {
parsedVersion, err := utilversion.ParseGeneric(version)
if err != nil {
return false
}
return parsedVersion.Major() == 0
}
func isV1Version(version string) bool {
parsedVersion, err := utilversion.ParseGeneric(version)
if err != nil {
return false
}
return parsedVersion.Major() == 1
}

View File

@ -27,30 +27,36 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
fakeclient "k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
// create a plugin mgr to load plugins and setup a fake client
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
if err != nil {
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
}
func newTestPlugin(t *testing.T, client *fakeclient.Clientset, csiClient *fakecsi.Clientset) (*csiPlugin, string) {
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
}
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHost(
if client == nil {
client = fakeclient.NewSimpleClientset()
}
if csiClient == nil {
csiClient = fakecsi.NewSimpleClientset()
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,
fakeClient,
client,
csiClient,
nil,
"fakeNode",
)
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
@ -65,6 +71,13 @@ func newTestPlugin(t *testing.T) (*csiPlugin, string) {
t.Fatalf("cannot assert plugin to be type csiPlugin")
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
// Wait until the informer in CSI volume plugin has all CSIDrivers.
wait.PollImmediate(testInformerSyncPeriod, testInformerSyncTimeout, func() (bool, error) {
return csiPlug.csiDriverInformer.Informer().HasSynced(), nil
})
}
return csiPlug, tmpDir
}
@ -91,8 +104,20 @@ func makeTestPV(name string, sizeGig int, driverName, volID string) *api.Persist
}
}
func registerFakePlugin(pluginName, endpoint string, versions []string, t *testing.T) {
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
highestSupportedVersions, err := highestSupportedVersion(versions)
if err != nil {
t.Fatalf("unexpected error parsing versions (%v) for pluginName % q endpoint %q: %#v", versions, pluginName, endpoint, err)
}
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint, highestSupportedVersion: highestSupportedVersions}
}
func TestPluginGetPluginName(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
if plug.GetPluginName() != "kubernetes.io/csi" {
t.Errorf("unexpected plugin name %v", plug.GetPluginName())
@ -100,7 +125,9 @@ func TestPluginGetPluginName(t *testing.T) {
}
func TestPluginGetVolumeName(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
@ -116,6 +143,7 @@ func TestPluginGetVolumeName(t *testing.T) {
for _, tc := range testCases {
t.Logf("testing: %s", tc.name)
registerFakePlugin(tc.driverName, "endpoint", []string{"0.3.0"}, t)
pv := makeTestPV("test-pv", 10, tc.driverName, tc.volName)
spec := volume.NewSpecFromPersistentVolume(pv, false)
name, err := plug.GetVolumeName(spec)
@ -129,9 +157,12 @@ func TestPluginGetVolumeName(t *testing.T) {
}
func TestPluginCanSupport(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, false)
@ -141,7 +172,9 @@ func TestPluginCanSupport(t *testing.T) {
}
func TestPluginConstructVolumeSpec(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
@ -186,6 +219,14 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
}
if spec.PersistentVolume.Spec.VolumeMode == nil {
t.Fatalf("Volume mode has not been set.")
}
if *spec.PersistentVolume.Spec.VolumeMode != api.PersistentVolumeFilesystem {
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
}
if spec.Name() != tc.specVolID {
t.Errorf("Unexpected spec name %s", spec.Name())
}
@ -193,9 +234,12 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
}
func TestPluginNewMounter(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.2.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
@ -212,7 +256,7 @@ func TestPluginNewMounter(t *testing.T) {
csiMounter := mounter.(*csiMountMgr)
// validate mounter fields
if csiMounter.driverName != testDriver {
if string(csiMounter.driverName) != testDriver {
t.Error("mounter driver name not set")
}
if csiMounter.volumeID != testVol {
@ -241,9 +285,12 @@ func TestPluginNewMounter(t *testing.T) {
}
func TestPluginNewUnmounter(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file to re-create client
@ -286,7 +333,9 @@ func TestPluginNewUnmounter(t *testing.T) {
}
func TestPluginNewAttacher(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -304,7 +353,9 @@ func TestPluginNewAttacher(t *testing.T) {
}
func TestPluginNewDetacher(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
detacher, err := plug.NewDetacher()
@ -322,9 +373,12 @@ func TestPluginNewDetacher(t *testing.T) {
}
func TestPluginNewBlockMapper(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
mounter, err := plug.NewBlockVolumeMapper(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
@ -341,7 +395,7 @@ func TestPluginNewBlockMapper(t *testing.T) {
csiMapper := mounter.(*csiBlockMapper)
// validate mounter fields
if csiMapper.driverName != testDriver {
if string(csiMapper.driverName) != testDriver {
t.Error("CSI block mapper missing driver name")
}
if csiMapper.volumeID != testVol {
@ -367,9 +421,12 @@ func TestPluginNewBlockMapper(t *testing.T) {
}
func TestPluginNewUnmapper(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file to re-create client
@ -415,7 +472,7 @@ func TestPluginNewUnmapper(t *testing.T) {
}
// test loaded vol data
if csiUnmapper.driverName != testDriver {
if string(csiUnmapper.driverName) != testDriver {
t.Error("unmapper driverName not set")
}
if csiUnmapper.volumeID != testVol {
@ -424,7 +481,9 @@ func TestPluginNewUnmapper(t *testing.T) {
}
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
plug, tmpDir := newTestPlugin(t, nil, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
@ -463,6 +522,14 @@ func TestPluginConstructBlockVolumeSpec(t *testing.T) {
continue
}
if spec.PersistentVolume.Spec.VolumeMode == nil {
t.Fatalf("Volume mode has not been set.")
}
if *spec.PersistentVolume.Spec.VolumeMode != api.PersistentVolumeBlock {
t.Errorf("Unexpected volume mode %q", *spec.PersistentVolume.Spec.VolumeMode)
}
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
if volHandle != tc.data[volDataKey.volHandle] {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
@ -473,3 +540,433 @@ func TestPluginConstructBlockVolumeSpec(t *testing.T) {
}
}
}
func TestValidatePlugin(t *testing.T) {
testCases := []struct {
pluginName string
endpoint string
versions []string
foundInDeprecatedDir bool
shouldFail bool
}{
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.0.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.3.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.2.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.0.0"},
foundInDeprecatedDir: true,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v0.3.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"0.2.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.2.0", "v0.3.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"0.2.0", "v0.3.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.2.0", "v1.0.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"0.2.0", "v1.0.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"0.2.0", "v1.2.3"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"0.2.0", "v1.2.3"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.2.3", "v0.3.0"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.2.3", "v0.3.0"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.2.3", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.2.3", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v0.3.0", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: false,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: false,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"4.9.12", "2.0.1"},
foundInDeprecatedDir: false,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"4.9.12", "2.0.1"},
foundInDeprecatedDir: true,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{},
foundInDeprecatedDir: false,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{},
foundInDeprecatedDir: true,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions: []string{"var", "boo", "foo"},
foundInDeprecatedDir: false,
shouldFail: true,
},
{
pluginName: "test.plugin",
endpoint: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions: []string{"var", "boo", "foo"},
foundInDeprecatedDir: true,
shouldFail: true,
},
}
for _, tc := range testCases {
// Arrange & Act
err := PluginHandler.ValidatePlugin(tc.pluginName, tc.endpoint, tc.versions, tc.foundInDeprecatedDir)
// Assert
if tc.shouldFail && err == nil {
t.Fatalf("expecting ValidatePlugin to fail, but got nil error for testcase: %#v", tc)
}
if !tc.shouldFail && err != nil {
t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err)
}
}
}
func TestValidatePluginExistingDriver(t *testing.T) {
testCases := []struct {
pluginName1 string
endpoint1 string
versions1 []string
pluginName2 string
endpoint2 string
versions2 []string
foundInDeprecatedDir2 bool
shouldFail bool
}{
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin2",
endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: false,
shouldFail: false,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin2",
endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: true,
shouldFail: true,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: false,
shouldFail: true,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: false,
shouldFail: true,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions1: []string{"v1.0.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions2: []string{"v1.0.0"},
foundInDeprecatedDir2: true,
shouldFail: true,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions1: []string{"v0.3.0", "0.2.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins_registry/myplugin/csi.sock",
versions2: []string{"1.0.0"},
foundInDeprecatedDir2: false,
shouldFail: false,
},
{
pluginName1: "test.plugin",
endpoint1: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions1: []string{"v0.3.0", "0.2.0"},
pluginName2: "test.plugin",
endpoint2: "/var/log/kubelet/plugins/myplugin/csi.sock",
versions2: []string{"1.0.0"},
foundInDeprecatedDir2: true,
shouldFail: true,
},
}
for _, tc := range testCases {
// Arrange & Act
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
highestSupportedVersions1, err := highestSupportedVersion(tc.versions1)
if err != nil {
t.Fatalf("unexpected error parsing version for testcase: %#v", tc)
}
csiDrivers.driversMap[tc.pluginName1] = csiDriver{driverName: tc.pluginName1, driverEndpoint: tc.endpoint1, highestSupportedVersion: highestSupportedVersions1}
// Arrange & Act
err = PluginHandler.ValidatePlugin(tc.pluginName2, tc.endpoint2, tc.versions2, tc.foundInDeprecatedDir2)
// Assert
if tc.shouldFail && err == nil {
t.Fatalf("expecting ValidatePlugin to fail, but got nil error for testcase: %#v", tc)
}
if !tc.shouldFail && err != nil {
t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err)
}
}
}
func TestHighestSupportedVersion(t *testing.T) {
testCases := []struct {
versions []string
expectedHighestSupportedVersion string
shouldFail bool
}{
{
versions: []string{"v1.0.0"},
expectedHighestSupportedVersion: "1.0.0",
shouldFail: false,
},
{
versions: []string{"0.3.0"},
expectedHighestSupportedVersion: "0.3.0",
shouldFail: false,
},
{
versions: []string{"0.2.0"},
expectedHighestSupportedVersion: "0.2.0",
shouldFail: false,
},
{
versions: []string{"1.0.0"},
expectedHighestSupportedVersion: "1.0.0",
shouldFail: false,
},
{
versions: []string{"v0.3.0"},
expectedHighestSupportedVersion: "0.3.0",
shouldFail: false,
},
{
versions: []string{"0.2.0"},
expectedHighestSupportedVersion: "0.2.0",
shouldFail: false,
},
{
versions: []string{"0.2.0", "v0.3.0"},
expectedHighestSupportedVersion: "0.3.0",
shouldFail: false,
},
{
versions: []string{"0.2.0", "v1.0.0"},
expectedHighestSupportedVersion: "1.0.0",
shouldFail: false,
},
{
versions: []string{"0.2.0", "v1.2.3"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{"v1.2.3", "v0.3.0"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{"v1.2.3", "v0.3.0", "2.0.1"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{"4.9.12", "2.0.1"},
expectedHighestSupportedVersion: "",
shouldFail: true,
},
{
versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"},
expectedHighestSupportedVersion: "1.2.3",
shouldFail: false,
},
{
versions: []string{},
expectedHighestSupportedVersion: "",
shouldFail: true,
},
{
versions: []string{"var", "boo", "foo"},
expectedHighestSupportedVersion: "",
shouldFail: true,
},
}
for _, tc := range testCases {
// Arrange & Act
actual, err := highestSupportedVersion(tc.versions)
// Assert
if tc.shouldFail && err == nil {
t.Fatalf("expecting highestSupportedVersion to fail, but got nil error for testcase: %#v", tc)
}
if !tc.shouldFail && err != nil {
t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err)
}
if tc.expectedHighestSupportedVersion != "" {
result, err := actual.Compare(tc.expectedHighestSupportedVersion)
if err != nil {
t.Fatalf("comparison failed with %v for testcase %#v", err, tc)
}
if result != 0 {
t.Fatalf("expectedHighestSupportedVersion %v, but got %v for tc: %#v", tc.expectedHighestSupportedVersion, actual, tc)
}
}
}
}

View File

@ -22,19 +22,25 @@ import (
"os"
"path"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"time"
)
const (
testInformerSyncPeriod = 100 * time.Millisecond
testInformerSyncTimeout = 30 * time.Second
)
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) {
credentials := map[string]string{}
secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{})
if err != nil {
glog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
klog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
return credentials, err
}
for key, value := range secret.Data {
@ -47,18 +53,18 @@ func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretRef
// saveVolumeData persists parameter data as json file at the provided location
func saveVolumeData(dir string, fileName string, data map[string]string) error {
dataFilePath := path.Join(dir, fileName)
glog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
klog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
file, err := os.Create(dataFilePath)
if err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
klog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
defer file.Close()
if err := json.NewEncoder(file).Encode(data); err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
klog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
klog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
return nil
}
@ -66,17 +72,17 @@ func saveVolumeData(dir string, fileName string, data map[string]string) error {
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
// remove /mount at the end
dataFileName := path.Join(dir, fileName)
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
klog.V(4).Info(log("loading volume data file [%s]", dataFileName))
file, err := os.Open(dataFileName)
if err != nil {
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
klog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
return nil, err
}
defer file.Close()
data := map[string]string{}
if err := json.NewDecoder(file).Decode(&data); err != nil {
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
klog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
return nil, err
}
@ -121,3 +127,16 @@ func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data")
}
// hasReadWriteOnce returns true if modes contains v1.ReadWriteOnce
func hasReadWriteOnce(modes []api.PersistentVolumeAccessMode) bool {
if modes == nil {
return false
}
for _, mode := range modes {
if mode == api.ReadWriteOnce {
return true
}
}
return false
}

28
vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/BUILD generated vendored Normal file
View File

@ -0,0 +1,28 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["csi.pb.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/csiv0",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes/wrappers:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

5007
vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,11 +2,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["fake_client.go"],
srcs = [
"fake_client.go",
"fake_closer.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/csi/fake",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)

View File

@ -23,7 +23,7 @@ import (
"google.golang.org/grpc"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
csipb "github.com/container-storage-interface/spec/lib/go/csi"
)
// IdentityClient is a CSI identity client used for testing
@ -56,19 +56,26 @@ func (f *IdentityClient) Probe(ctx context.Context, in *csipb.ProbeRequest, opts
return nil, nil
}
type CSIVolume struct {
VolumeContext map[string]string
Path string
MountFlags []string
}
// NodeClient returns CSI node client
type NodeClient struct {
nodePublishedVolumes map[string]string
nodeStagedVolumes map[string]string
nodePublishedVolumes map[string]CSIVolume
nodeStagedVolumes map[string]CSIVolume
stageUnstageSet bool
nodeGetInfoResp *csipb.NodeGetInfoResponse
nextErr error
}
// NewNodeClient returns fake node client
func NewNodeClient(stageUnstageSet bool) *NodeClient {
return &NodeClient{
nodePublishedVolumes: make(map[string]string),
nodeStagedVolumes: make(map[string]string),
nodePublishedVolumes: make(map[string]CSIVolume),
nodeStagedVolumes: make(map[string]CSIVolume),
stageUnstageSet: stageUnstageSet,
}
}
@ -78,18 +85,25 @@ func (f *NodeClient) SetNextError(err error) {
f.nextErr = err
}
func (f *NodeClient) SetNodeGetInfoResp(resp *csipb.NodeGetInfoResponse) {
f.nodeGetInfoResp = resp
}
// GetNodePublishedVolumes returns node published volumes
func (f *NodeClient) GetNodePublishedVolumes() map[string]string {
func (f *NodeClient) GetNodePublishedVolumes() map[string]CSIVolume {
return f.nodePublishedVolumes
}
// GetNodeStagedVolumes returns node staged volumes
func (f *NodeClient) GetNodeStagedVolumes() map[string]string {
func (f *NodeClient) GetNodeStagedVolumes() map[string]CSIVolume {
return f.nodeStagedVolumes
}
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string) {
f.nodeStagedVolumes[volID] = deviceMountPath
func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string, volumeContext map[string]string) {
f.nodeStagedVolumes[volID] = CSIVolume{
Path: deviceMountPath,
VolumeContext: volumeContext,
}
}
// NodePublishVolume implements CSI NodePublishVolume
@ -110,7 +124,11 @@ func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePubli
if !strings.Contains(fsTypes, fsType) {
return nil, errors.New("invalid fstype")
}
f.nodePublishedVolumes[req.GetVolumeId()] = req.GetTargetPath()
f.nodePublishedVolumes[req.GetVolumeId()] = CSIVolume{
Path: req.GetTargetPath(),
VolumeContext: req.GetVolumeContext(),
MountFlags: req.GetVolumeCapability().GetMount().MountFlags,
}
return &csipb.NodePublishVolumeResponse{}, nil
}
@ -153,7 +171,10 @@ func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVo
return nil, errors.New("invalid fstype")
}
f.nodeStagedVolumes[req.GetVolumeId()] = req.GetStagingTargetPath()
f.nodeStagedVolumes[req.GetVolumeId()] = CSIVolume{
Path: req.GetStagingTargetPath(),
VolumeContext: req.GetVolumeContext(),
}
return &csipb.NodeStageVolumeResponse{}, nil
}
@ -174,9 +195,12 @@ func (f *NodeClient) NodeUnstageVolume(ctx context.Context, req *csipb.NodeUnsta
return &csipb.NodeUnstageVolumeResponse{}, nil
}
// NodeGetId implements method
func (f *NodeClient) NodeGetId(ctx context.Context, in *csipb.NodeGetIdRequest, opts ...grpc.CallOption) (*csipb.NodeGetIdResponse, error) {
return nil, nil
// NodeGetId implements csi method
func (f *NodeClient) NodeGetInfo(ctx context.Context, in *csipb.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipb.NodeGetInfoResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
}
return f.nodeGetInfoResp, nil
}
// NodeGetCapabilities implements csi method
@ -198,6 +222,11 @@ func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetC
return nil, nil
}
// NodeGetVolumeStats implements csi method
func (f *NodeClient) NodeGetVolumeStats(ctx context.Context, in *csipb.NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*csipb.NodeGetVolumeStatsResponse, error) {
return nil, nil
}
// ControllerClient represents a CSI Controller client
type ControllerClient struct {
nextCapabilities []*csipb.ControllerServiceCapability

View File

@ -0,0 +1,47 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"testing"
)
func NewCloser(t *testing.T) *Closer {
return &Closer{
t: t,
}
}
type Closer struct {
wasCalled bool
t *testing.T
}
func (c *Closer) Close() error {
c.wasCalled = true
return nil
}
func (c *Closer) Check() *Closer {
c.t.Helper()
if !c.wasCalled {
c.t.Error("expected closer to have been called")
}
return c
}

View File

@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["labelmanager.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/labelmanager",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,251 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package labelmanager includes internal functions used to add/delete labels to
// kubernetes nodes for corresponding CSI drivers
package labelmanager // import "k8s.io/kubernetes/pkg/volume/csi/labelmanager"
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/retry"
)
const (
// Name of node annotation that contains JSON map of driver names to node
// names
annotationKey = "csi.volume.kubernetes.io/nodeid"
csiPluginName = "kubernetes.io/csi"
)
// labelManagementStruct is struct of channels used for communication between the driver registration
// code and the go routine responsible for managing the node's labels
type labelManagerStruct struct {
nodeName types.NodeName
k8s kubernetes.Interface
}
// Interface implements an interface for managing labels of a node
type Interface interface {
AddLabels(driverName string) error
}
// NewLabelManager initializes labelManagerStruct and returns available interfaces
func NewLabelManager(nodeName types.NodeName, kubeClient kubernetes.Interface) Interface {
return labelManagerStruct{
nodeName: nodeName,
k8s: kubeClient,
}
}
// nodeLabelManager waits for labeling requests initiated by the driver's registration
// process.
func (lm labelManagerStruct) AddLabels(driverName string) error {
err := verifyAndAddNodeId(string(lm.nodeName), lm.k8s.CoreV1().Nodes(), driverName, string(lm.nodeName))
if err != nil {
return fmt.Errorf("failed to update node %s's annotation with error: %+v", lm.nodeName, err)
}
return nil
}
// Clones the given map and returns a new map with the given key and value added.
// Returns the given map, if annotationKey is empty.
func cloneAndAddAnnotation(
annotations map[string]string,
annotationKey,
annotationValue string) map[string]string {
if annotationKey == "" {
// Don't need to add an annotation.
return annotations
}
// Clone.
newAnnotations := map[string]string{}
for key, value := range annotations {
newAnnotations[key] = value
}
newAnnotations[annotationKey] = annotationValue
return newAnnotations
}
func verifyAndAddNodeId(
k8sNodeName string,
k8sNodesClient corev1.NodeInterface,
csiDriverName string,
csiDriverNodeId string) error {
// Add or update annotation on Node object
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
if getErr != nil {
glog.Errorf("Failed to get latest version of Node: %v", getErr)
return getErr // do not wrap error
}
var previousAnnotationValue string
if result.ObjectMeta.Annotations != nil {
previousAnnotationValue =
result.ObjectMeta.Annotations[annotationKey]
glog.V(3).Infof(
"previousAnnotationValue=%q", previousAnnotationValue)
}
existingDriverMap := map[string]string{}
if previousAnnotationValue != "" {
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKey,
previousAnnotationValue,
err)
}
}
if val, ok := existingDriverMap[csiDriverName]; ok {
if val == csiDriverNodeId {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key value {%q: %q} alredy eixst in node %q annotation, no need to update: %v",
csiDriverName,
csiDriverNodeId,
annotationKey,
previousAnnotationValue)
return nil
}
}
// Add/update annotation value
existingDriverMap[csiDriverName] = csiDriverNodeId
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return fmt.Errorf(
"failed while trying to add key value {%q: %q} to node %q annotation. Existing value: %v",
csiDriverName,
csiDriverNodeId,
annotationKey,
previousAnnotationValue)
}
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
result.ObjectMeta.Annotations,
annotationKey,
string(jsonObj))
_, updateErr := k8sNodesClient.Update(result)
if updateErr == nil {
fmt.Printf(
"Updated node %q successfully for CSI driver %q and CSI node name %q",
k8sNodeName,
csiDriverName,
csiDriverNodeId)
}
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}
// Fetches Kubernetes node API object corresponding to k8sNodeName.
// If the csiDriverName is present in the node annotation, it is removed.
func verifyAndDeleteNodeId(
k8sNodeName string,
k8sNodesClient corev1.NodeInterface,
csiDriverName string) error {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
if getErr != nil {
glog.Errorf("failed to get latest version of Node: %v", getErr)
return getErr // do not wrap error
}
var previousAnnotationValue string
if result.ObjectMeta.Annotations != nil {
previousAnnotationValue =
result.ObjectMeta.Annotations[annotationKey]
glog.V(3).Infof(
"previousAnnotationValue=%q", previousAnnotationValue)
}
existingDriverMap := map[string]string{}
if previousAnnotationValue == "" {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key %q does not exist in node %q annotation, no need to cleanup.",
csiDriverName,
annotationKey)
return nil
}
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKey,
previousAnnotationValue,
err)
}
if _, ok := existingDriverMap[csiDriverName]; !ok {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key %q does not eixst in node %q annotation, no need to cleanup: %v",
csiDriverName,
annotationKey,
previousAnnotationValue)
return nil
}
// Add/update annotation value
delete(existingDriverMap, csiDriverName)
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return fmt.Errorf(
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
csiDriverName,
annotationKey,
previousAnnotationValue)
}
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
result.ObjectMeta.Annotations,
annotationKey,
string(jsonObj))
_, updateErr := k8sNodesClient.Update(result)
if updateErr == nil {
fmt.Printf(
"Updated node %q annotation to remove CSI driver %q.",
k8sNodeName,
csiDriverName)
}
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}

29
vendor/k8s.io/kubernetes/pkg/volume/csi/main_test.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"testing"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
_ "k8s.io/kubernetes/pkg/features"
)
func TestMain(m *testing.M) {
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
}

View File

@ -0,0 +1,67 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["nodeinfomanager.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager",
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["nodeinfomanager_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core/helper:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)

View File

@ -0,0 +1,660 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nodeinfomanager includes internal functions used to add/delete labels to
// kubernetes nodes for corresponding CSI drivers
package nodeinfomanager // import "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
import (
"encoding/json"
"fmt"
"strings"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
const (
// Name of node annotation that contains JSON map of driver names to node
annotationKeyNodeID = "csi.volume.kubernetes.io/nodeid"
)
var (
nodeKind = v1.SchemeGroupVersion.WithKind("Node")
updateBackoff = wait.Backoff{
Steps: 4,
Duration: 10 * time.Millisecond,
Factor: 5.0,
Jitter: 0.1,
}
)
// nodeInfoManager contains necessary common dependencies to update node info on both
// the Node and CSINodeInfo objects.
type nodeInfoManager struct {
nodeName types.NodeName
volumeHost volume.VolumeHost
}
// If no updates is needed, the function must return the same Node object as the input.
type nodeUpdateFunc func(*v1.Node) (newNode *v1.Node, updated bool, err error)
// Interface implements an interface for managing labels of a node
type Interface interface {
CreateCSINodeInfo() (*csiv1alpha1.CSINodeInfo, error)
// Record in the cluster the given node information from the CSI driver with the given name.
// Concurrent calls to InstallCSIDriver() is allowed, but they should not be intertwined with calls
// to other methods in this interface.
InstallCSIDriver(driverName string, driverNodeID string, maxVolumeLimit int64, topology map[string]string) error
// Remove in the cluster node information from the CSI driver with the given name.
// Concurrent calls to UninstallCSIDriver() is allowed, but they should not be intertwined with calls
// to other methods in this interface.
UninstallCSIDriver(driverName string) error
}
// NewNodeInfoManager initializes nodeInfoManager
func NewNodeInfoManager(
nodeName types.NodeName,
volumeHost volume.VolumeHost) Interface {
return &nodeInfoManager{
nodeName: nodeName,
volumeHost: volumeHost,
}
}
// InstallCSIDriver updates the node ID annotation in the Node object and CSIDrivers field in the
// CSINodeInfo object. If the CSINodeInfo object doesn't yet exist, it will be created.
// If multiple calls to InstallCSIDriver() are made in parallel, some calls might receive Node or
// CSINodeInfo update conflicts, which causes the function to retry the corresponding update.
func (nim *nodeInfoManager) InstallCSIDriver(driverName string, driverNodeID string, maxAttachLimit int64, topology map[string]string) error {
if driverNodeID == "" {
return fmt.Errorf("error adding CSI driver node info: driverNodeID must not be empty")
}
nodeUpdateFuncs := []nodeUpdateFunc{
updateNodeIDInNode(driverName, driverNodeID),
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
nodeUpdateFuncs = append(nodeUpdateFuncs, updateTopologyLabels(topology))
}
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
nodeUpdateFuncs = append(nodeUpdateFuncs, updateMaxAttachLimit(driverName, maxAttachLimit))
}
err := nim.updateNode(nodeUpdateFuncs...)
if err != nil {
return fmt.Errorf("error updating Node object with CSI driver node info: %v", err)
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
err = nim.updateCSINodeInfo(driverName, driverNodeID, topology)
if err != nil {
return fmt.Errorf("error updating CSINodeInfo object with CSI driver node info: %v", err)
}
}
return nil
}
// UninstallCSIDriver removes the node ID annotation from the Node object and CSIDrivers field from the
// CSINodeInfo object. If the CSINOdeInfo object contains no CSIDrivers, it will be deleted.
// If multiple calls to UninstallCSIDriver() are made in parallel, some calls might receive Node or
// CSINodeInfo update conflicts, which causes the function to retry the corresponding update.
func (nim *nodeInfoManager) UninstallCSIDriver(driverName string) error {
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
err := nim.uninstallDriverFromCSINodeInfo(driverName)
if err != nil {
return fmt.Errorf("error uninstalling CSI driver from CSINodeInfo object %v", err)
}
}
err := nim.updateNode(
removeMaxAttachLimit(driverName),
removeNodeIDFromNode(driverName),
)
if err != nil {
return fmt.Errorf("error removing CSI driver node info from Node object %v", err)
}
return nil
}
func (nim *nodeInfoManager) updateNode(updateFuncs ...nodeUpdateFunc) error {
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUpdateNode(updateFuncs...); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating node: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
// updateNode repeatedly attempts to update the corresponding node object
// which is modified by applying the given update functions sequentially.
// Because updateFuncs are applied sequentially, later updateFuncs should take into account
// the effects of previous updateFuncs to avoid potential conflicts. For example, if multiple
// functions update the same field, updates in the last function are persisted.
func (nim *nodeInfoManager) tryUpdateNode(updateFuncs ...nodeUpdateFunc) error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten.
kubeClient := nim.volumeHost.GetKubeClient()
if kubeClient == nil {
return fmt.Errorf("error getting kube client")
}
nodeClient := kubeClient.CoreV1().Nodes()
originalNode, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return err
}
node := originalNode.DeepCopy()
needUpdate := false
for _, update := range updateFuncs {
newNode, updated, err := update(node)
if err != nil {
return err
}
node = newNode
needUpdate = needUpdate || updated
}
if needUpdate {
// PatchNodeStatus can update both node's status and labels or annotations
// Updating status by directly updating node does not work
_, _, updateErr := nodeutil.PatchNodeStatus(kubeClient.CoreV1(), types.NodeName(node.Name), originalNode, node)
return updateErr
}
return nil
}
// Guarantees the map is non-nil if no error is returned.
func buildNodeIDMapFromAnnotation(node *v1.Node) (map[string]string, error) {
var previousAnnotationValue string
if node.ObjectMeta.Annotations != nil {
previousAnnotationValue =
node.ObjectMeta.Annotations[annotationKeyNodeID]
}
var existingDriverMap map[string]string
if previousAnnotationValue != "" {
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return nil, fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKeyNodeID,
previousAnnotationValue,
err)
}
}
if existingDriverMap == nil {
return make(map[string]string), nil
}
return existingDriverMap, nil
}
// updateNodeIDInNode returns a function that updates a Node object with the given
// Node ID information.
func updateNodeIDInNode(
csiDriverName string,
csiDriverNodeID string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
existingDriverMap, err := buildNodeIDMapFromAnnotation(node)
if err != nil {
return nil, false, err
}
if val, ok := existingDriverMap[csiDriverName]; ok {
if val == csiDriverNodeID {
// Value already exists in node annotation, nothing more to do
return node, false, nil
}
}
// Add/update annotation value
existingDriverMap[csiDriverName] = csiDriverNodeID
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return nil, false, fmt.Errorf(
"error while marshalling node ID map updated with driverName=%q, nodeID=%q: %v",
csiDriverName,
csiDriverNodeID,
err)
}
if node.ObjectMeta.Annotations == nil {
node.ObjectMeta.Annotations = make(map[string]string)
}
node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj)
return node, true, nil
}
}
// removeNodeIDFromNode returns a function that removes node ID information matching the given
// driver name from a Node object.
func removeNodeIDFromNode(csiDriverName string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
var previousAnnotationValue string
if node.ObjectMeta.Annotations != nil {
previousAnnotationValue =
node.ObjectMeta.Annotations[annotationKeyNodeID]
}
if previousAnnotationValue == "" {
return node, false, nil
}
// Parse previousAnnotationValue as JSON
existingDriverMap := map[string]string{}
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return nil, false, fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKeyNodeID,
previousAnnotationValue,
err)
}
if _, ok := existingDriverMap[csiDriverName]; !ok {
// Value is already missing in node annotation, nothing more to do
return node, false, nil
}
// Delete annotation value
delete(existingDriverMap, csiDriverName)
if len(existingDriverMap) == 0 {
delete(node.ObjectMeta.Annotations, annotationKeyNodeID)
} else {
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return nil, false, fmt.Errorf(
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
csiDriverName,
annotationKeyNodeID,
previousAnnotationValue)
}
node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj)
}
return node, true, nil
}
}
// updateTopologyLabels returns a function that updates labels of a Node object with the given
// topology information.
func updateTopologyLabels(topology map[string]string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
if topology == nil || len(topology) == 0 {
return node, false, nil
}
for k, v := range topology {
if curVal, exists := node.Labels[k]; exists && curVal != v {
return nil, false, fmt.Errorf("detected topology value collision: driver reported %q:%q but existing label is %q:%q", k, v, k, curVal)
}
}
if node.Labels == nil {
node.Labels = make(map[string]string)
}
for k, v := range topology {
node.Labels[k] = v
}
return node, true, nil
}
}
func (nim *nodeInfoManager) updateCSINodeInfo(
driverName string,
driverNodeID string,
topology map[string]string) error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUpdateCSINodeInfo(csiKubeClient, driverName, driverNodeID, topology); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating CSINodeInfo: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
func (nim *nodeInfoManager) tryUpdateCSINodeInfo(
csiKubeClient csiclientset.Interface,
driverName string,
driverNodeID string,
topology map[string]string) error {
nodeInfo, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{})
if nodeInfo == nil || errors.IsNotFound(err) {
nodeInfo, err = nim.CreateCSINodeInfo()
}
if err != nil {
return err
}
return nim.installDriverToCSINodeInfo(nodeInfo, driverName, driverNodeID, topology)
}
func (nim *nodeInfoManager) CreateCSINodeInfo() (*csiv1alpha1.CSINodeInfo, error) {
kubeClient := nim.volumeHost.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("error getting kube client")
}
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return nil, fmt.Errorf("error getting CSI client")
}
node, err := kubeClient.CoreV1().Nodes().Get(string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return nil, err
}
nodeInfo := &csiv1alpha1.CSINodeInfo{
ObjectMeta: metav1.ObjectMeta{
Name: string(nim.nodeName),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: nodeKind.Version,
Kind: nodeKind.Kind,
Name: node.Name,
UID: node.UID,
},
},
},
Spec: csiv1alpha1.CSINodeInfoSpec{
Drivers: []csiv1alpha1.CSIDriverInfoSpec{},
},
Status: csiv1alpha1.CSINodeInfoStatus{
Drivers: []csiv1alpha1.CSIDriverInfoStatus{},
},
}
return csiKubeClient.CsiV1alpha1().CSINodeInfos().Create(nodeInfo)
}
func (nim *nodeInfoManager) installDriverToCSINodeInfo(
nodeInfo *csiv1alpha1.CSINodeInfo,
driverName string,
driverNodeID string,
topology map[string]string) error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
topologyKeys := make(sets.String)
for k := range topology {
topologyKeys.Insert(k)
}
specModified := true
statusModified := true
// Clone driver list, omitting the driver that matches the given driverName
newDriverSpecs := []csiv1alpha1.CSIDriverInfoSpec{}
for _, driverInfoSpec := range nodeInfo.Spec.Drivers {
if driverInfoSpec.Name == driverName {
if driverInfoSpec.NodeID == driverNodeID &&
sets.NewString(driverInfoSpec.TopologyKeys...).Equal(topologyKeys) {
specModified = false
}
} else {
// Omit driverInfoSpec matching given driverName
newDriverSpecs = append(newDriverSpecs, driverInfoSpec)
}
}
newDriverStatuses := []csiv1alpha1.CSIDriverInfoStatus{}
for _, driverInfoStatus := range nodeInfo.Status.Drivers {
if driverInfoStatus.Name == driverName {
if driverInfoStatus.Available &&
/* TODO(https://github.com/kubernetes/enhancements/issues/625): Add actual migration status */
driverInfoStatus.VolumePluginMechanism == csiv1alpha1.VolumePluginMechanismInTree {
statusModified = false
}
} else {
// Omit driverInfoSpec matching given driverName
newDriverStatuses = append(newDriverStatuses, driverInfoStatus)
}
}
if !specModified && !statusModified {
return nil
}
// Append new driver
driverSpec := csiv1alpha1.CSIDriverInfoSpec{
Name: driverName,
NodeID: driverNodeID,
TopologyKeys: topologyKeys.List(),
}
driverStatus := csiv1alpha1.CSIDriverInfoStatus{
Name: driverName,
Available: true,
// TODO(https://github.com/kubernetes/enhancements/issues/625): Add actual migration status
VolumePluginMechanism: csiv1alpha1.VolumePluginMechanismInTree,
}
newDriverSpecs = append(newDriverSpecs, driverSpec)
newDriverStatuses = append(newDriverStatuses, driverStatus)
nodeInfo.Spec.Drivers = newDriverSpecs
nodeInfo.Status.Drivers = newDriverStatuses
err := validateCSINodeInfo(nodeInfo)
if err != nil {
return err
}
_, err = csiKubeClient.CsiV1alpha1().CSINodeInfos().Update(nodeInfo)
return err
}
func (nim *nodeInfoManager) uninstallDriverFromCSINodeInfo(
csiDriverName string) error {
csiKubeClient := nim.volumeHost.GetCSIClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUninstallDriverFromCSINodeInfo(csiKubeClient, csiDriverName); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating CSINodeInfo: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
func (nim *nodeInfoManager) tryUninstallDriverFromCSINodeInfo(
csiKubeClient csiclientset.Interface,
csiDriverName string) error {
nodeInfoClient := csiKubeClient.CsiV1alpha1().CSINodeInfos()
nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return err // do not wrap error
}
hasModified := false
newDriverStatuses := []csiv1alpha1.CSIDriverInfoStatus{}
for _, driverStatus := range nodeInfo.Status.Drivers {
if driverStatus.Name == csiDriverName {
// Uninstall the driver if we find it
hasModified = driverStatus.Available
driverStatus.Available = false
}
newDriverStatuses = append(newDriverStatuses, driverStatus)
}
nodeInfo.Status.Drivers = newDriverStatuses
if !hasModified {
// No changes, don't update
return nil
}
err = validateCSINodeInfo(nodeInfo)
if err != nil {
return err
}
_, updateErr := nodeInfoClient.Update(nodeInfo)
return updateErr // do not wrap error
}
func updateMaxAttachLimit(driverName string, maxLimit int64) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
if maxLimit <= 0 {
klog.V(4).Infof("skipping adding attach limit for %s", driverName)
return node, false, nil
}
if node.Status.Capacity == nil {
node.Status.Capacity = v1.ResourceList{}
}
if node.Status.Allocatable == nil {
node.Status.Allocatable = v1.ResourceList{}
}
limitKeyName := util.GetCSIAttachLimitKey(driverName)
node.Status.Capacity[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI)
node.Status.Allocatable[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI)
return node, true, nil
}
}
func removeMaxAttachLimit(driverName string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
limitKey := v1.ResourceName(util.GetCSIAttachLimitKey(driverName))
capacityExists := false
if node.Status.Capacity != nil {
_, capacityExists = node.Status.Capacity[limitKey]
}
allocatableExists := false
if node.Status.Allocatable != nil {
_, allocatableExists = node.Status.Allocatable[limitKey]
}
if !capacityExists && !allocatableExists {
return node, false, nil
}
delete(node.Status.Capacity, limitKey)
if len(node.Status.Capacity) == 0 {
node.Status.Capacity = nil
}
delete(node.Status.Allocatable, limitKey)
if len(node.Status.Allocatable) == 0 {
node.Status.Allocatable = nil
}
return node, true, nil
}
}
// validateCSINodeInfo ensures members of CSINodeInfo object satisfies map and set semantics.
// Before calling CSINodeInfoInterface.Update(), validateCSINodeInfo() should be invoked to
// make sure the CSINodeInfo is compliant
func validateCSINodeInfo(nodeInfo *csiv1alpha1.CSINodeInfo) error {
if len(nodeInfo.Status.Drivers) < 1 {
return fmt.Errorf("at least one Driver entry is required in driver statuses")
}
if len(nodeInfo.Spec.Drivers) < 1 {
return fmt.Errorf("at least one Driver entry is required in driver specs")
}
if len(nodeInfo.Status.Drivers) != len(nodeInfo.Spec.Drivers) {
return fmt.Errorf("")
}
// check for duplicate entries for the same driver in statuses
var errors []string
driverNamesInStatuses := make(sets.String)
for _, driverInfo := range nodeInfo.Status.Drivers {
if driverNamesInStatuses.Has(driverInfo.Name) {
errors = append(errors, fmt.Sprintf("duplicate entries found for driver: %s in driver statuses", driverInfo.Name))
}
driverNamesInStatuses.Insert(driverInfo.Name)
}
// check for duplicate entries for the same driver in specs
driverNamesInSpecs := make(sets.String)
for _, driverInfo := range nodeInfo.Spec.Drivers {
if driverNamesInSpecs.Has(driverInfo.Name) {
errors = append(errors, fmt.Sprintf("duplicate entries found for driver: %s in driver specs", driverInfo.Name))
}
driverNamesInSpecs.Insert(driverInfo.Name)
topoKeys := make(sets.String)
for _, key := range driverInfo.TopologyKeys {
if topoKeys.Has(key) {
errors = append(errors, fmt.Sprintf("duplicate topology keys %s found for driver %s in driver specs", key, driverInfo.Name))
}
topoKeys.Insert(key)
}
}
// check all entries in specs and status match
if !driverNamesInSpecs.Equal(driverNamesInStatuses) {
errors = append(errors, fmt.Sprintf("list of drivers in specs: %v does not match list of drivers in statuses: %v", driverNamesInSpecs.List(), driverNamesInStatuses.List()))
}
if len(errors) == 0 {
return nil
}
return fmt.Errorf(strings.Join(errors, ", "))
}

File diff suppressed because it is too large Load Diff