vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"csi_attacher.go",
"csi_block.go",
"csi_client.go",
"csi_mounter.go",
"csi_plugin.go",
@ -12,13 +13,14 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/volume/csi",
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/csi/labelmanager:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
@ -26,6 +28,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
@ -34,6 +37,7 @@ go_test(
name = "go_default_test",
srcs = [
"csi_attacher_test.go",
"csi_block_test.go",
"csi_client_test.go",
"csi_mounter_test.go",
"csi_plugin_test.go",
@ -43,15 +47,17 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/csi/fake:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
@ -70,6 +76,7 @@ filegroup(
srcs = [
":package-srcs",
"//pkg/volume/csi/fake:all-srcs",
"//pkg/volume/csi/labelmanager:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],

View File

@ -17,6 +17,7 @@ limitations under the License.
package csi
import (
"context"
"crypto/sha256"
"errors"
"fmt"
@ -27,7 +28,6 @@ import (
"time"
"github.com/golang/glog"
grpctx "golang.org/x/net/context"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
"k8s.io/api/core/v1"
@ -102,17 +102,11 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
}
// probe for attachment update here
// NOTE: any error from waiting for attachment is logged only. This is because
// the primary intent of the enclosing method is to create VolumeAttachment.
// DONOT return that error here as it is mitigated in attacher.WaitForAttach.
volAttachmentOK := true
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
volAttachmentOK = false
glog.Error(log("attacher.Attach attempted to wait for attachment to be ready, but failed with: %v", err))
return "", err
}
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment verified=%t: attachment object [%s]", volAttachmentOK, attachID))
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
return attachID, nil
}
@ -251,7 +245,7 @@ func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
return deviceMountPath, nil
}
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) (err error) {
glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
mounted, err := isDirMounted(c.plugin, deviceMountPath)
@ -275,31 +269,57 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
return err
}
if c.csiClient == nil {
if csiSource.Driver == "" {
return fmt.Errorf("attacher.MountDevice failed, driver name is empty")
// Store volume metadata for UnmountDevice. Keep it around even if the
// driver does not support NodeStage, UnmountDevice still needs it.
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
dataDir := filepath.Dir(deviceMountPath)
data := map[string]string{
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
}
if err = saveVolumeData(dataDir, volDataFileName, data); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
if cleanerr := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr))
}
addr := fmt.Sprintf(csiAddrTemplate, csiSource.Driver)
c.csiClient = newCsiDriverClient("unix", addr)
return err
}
defer func() {
if err != nil {
// clean up metadata
glog.Errorf(log("attacher.MountDevice failed: %v", err))
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
glog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err))
}
}
}()
if c.csiClient == nil {
c.csiClient = newCsiDriverClient(csiSource.Driver)
}
csi := c.csiClient
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
if err != nil {
glog.Error(log("attacher.MountDevice failed to check STAGE_UNSTAGE_VOLUME: %v", err))
return err
}
if !stageUnstageSet {
glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
return nil
}
// Start MountDevice
if deviceMountPath == "" {
return fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
return err
}
nodeName := string(c.plugin.host.GetNodeName())
@ -308,22 +328,24 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.MountDevice failed while getting volume attachment [id=%v]: %v", attachID, err))
return err
return err // This err already has enough context ("VolumeAttachment xyz not found")
}
if attachment == nil {
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
err = errors.New("no existing VolumeAttachment found")
return err
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
// create target_dir before call to NodeStageVolume
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
return err
nodeStageSecrets := map[string]string{}
if csiSource.NodeStageSecretRef != nil {
nodeStageSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
if err != nil {
err = fmt.Errorf("fetching NodeStageSecretRef %s/%s failed: %v",
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
return err
}
}
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
@ -332,15 +354,6 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
}
fsType := csiSource.FSType
if len(fsType) == 0 {
fsType = defaultFSType
}
nodeStageSecrets := map[string]string{}
if csiSource.NodeStageSecretRef != nil {
nodeStageSecrets = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
}
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
@ -351,11 +364,6 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
csiSource.VolumeAttributes)
if err != nil {
glog.Errorf(log("attacher.MountDevice failed: %v", err))
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
glog.Error(log("attacher.MountDevice failed to remove mount dir after a NodeStageVolume() error [%s]: %v", deviceMountPath, err))
return err
}
return err
}
@ -381,6 +389,11 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
volID := parts[1]
attachID := getAttachmentName(volID, driverName, string(nodeName))
if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil {
if apierrs.IsNotFound(err) {
// object deleted or never existed, done
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
return nil
}
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
return err
}
@ -463,19 +476,29 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
// Setup
driverName, volID, err := getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
return err
var driverName, volID string
dataDir := filepath.Dir(deviceMountPath)
data, err := loadVolumeData(dataDir, volDataFileName)
if err == nil {
driverName = data[volDataKey.driverName]
volID = data[volDataKey.volHandle]
} else {
glog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err))
// The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server
driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
if err != nil {
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
return err
}
}
if c.csiClient == nil {
addr := fmt.Sprintf(csiAddrTemplate, driverName)
c.csiClient = newCsiDriverClient("unix", addr)
c.csiClient = newCsiDriverClient(driverName)
}
csi := c.csiClient
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
@ -485,6 +508,11 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
}
if !stageUnstageSet {
glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
// Just delete the global directory + json file
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
}
return nil
}
@ -498,11 +526,16 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
return err
}
// Delete the global directory + json file
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
}
glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
return nil
}
func hasStageUnstageCapability(ctx grpctx.Context, csi csiClient) (bool, error) {
func hasStageUnstageCapability(ctx context.Context, csi csiClient) (bool, error) {
capabilities, err := csi.NodeGetCapabilities(ctx)
if err != nil {
return false, err

View File

@ -18,6 +18,7 @@ package csi
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -26,13 +27,13 @@ import (
storage "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
fakeclient "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/fake"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
@ -59,12 +60,13 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach
func TestAttacherAttach(t *testing.T) {
testCases := []struct {
name string
nodeName string
driverName string
volumeName string
attachID string
shouldFail bool
name string
nodeName string
driverName string
volumeName string
attachID string
injectAttacherError bool
shouldFail bool
}{
{
name: "test ok 1",
@ -104,13 +106,22 @@ func TestAttacherAttach(t *testing.T) {
attachID: getAttachmentName("vol02", "driver02", "node02"),
shouldFail: true,
},
{
name: "attacher error",
nodeName: "node02",
driverName: "driver02",
volumeName: "vol02",
attachID: getAttachmentName("vol02", "driver02", "node02"),
injectAttacherError: true,
shouldFail: true,
},
}
// attacher loop
for i, tc := range testCases {
t.Logf("test case: %s", tc.name)
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
@ -127,6 +138,9 @@ func TestAttacherAttach(t *testing.T) {
if !fail && err != nil {
t.Errorf("expecting no failure, but got err: %v", err)
}
if fail && err == nil {
t.Errorf("expecting failure, but got no err")
}
if attachID != id && !fail {
t.Errorf("expecting attachID %v, got %v", id, attachID)
}
@ -154,7 +168,14 @@ func TestAttacherAttach(t *testing.T) {
if attach == nil {
t.Logf("attachment not found for id:%v", tc.attachID)
} else {
attach.Status.Attached = true
if tc.injectAttacherError {
attach.Status.Attached = false
attach.Status.AttachError = &storage.VolumeError{
Message: "attacher error",
}
} else {
attach.Status.Attached = true
}
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
if err != nil {
t.Error(err)
@ -165,17 +186,7 @@ func TestAttacherAttach(t *testing.T) {
}
func TestAttacherWaitForVolumeAttachment(t *testing.T) {
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
nodeName := "test-node"
testCases := []struct {
name string
initAttached bool
@ -183,21 +194,18 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
trigerWatchEventTime time.Duration
initAttachErr *storage.VolumeError
finalAttachErr *storage.VolumeError
sleepTime time.Duration
timeout time.Duration
shouldFail bool
}{
{
name: "attach success at get",
initAttached: true,
sleepTime: 10 * time.Millisecond,
timeout: 50 * time.Millisecond,
shouldFail: false,
},
{
name: "attachment error ant get",
initAttachErr: &storage.VolumeError{Message: "missing volume"},
sleepTime: 10 * time.Millisecond,
timeout: 30 * time.Millisecond,
shouldFail: true,
},
@ -207,7 +215,6 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
finalAttached: true,
trigerWatchEventTime: 5 * time.Millisecond,
timeout: 50 * time.Millisecond,
sleepTime: 5 * time.Millisecond,
shouldFail: false,
},
{
@ -216,7 +223,6 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
finalAttached: false,
finalAttachErr: &storage.VolumeError{Message: "missing volume"},
trigerWatchEventTime: 5 * time.Millisecond,
sleepTime: 10 * time.Millisecond,
timeout: 30 * time.Millisecond,
shouldFail: true,
},
@ -226,13 +232,19 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
finalAttached: true,
trigerWatchEventTime: 100 * time.Millisecond,
timeout: 50 * time.Millisecond,
sleepTime: 5 * time.Millisecond,
shouldFail: true,
},
}
for i, tc := range testCases {
fakeWatcher.Reset()
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err := plug.NewAttacher()
if err != nil {
t.Fatalf("failed to create new attacher: %v", err)
}
csiAttacher := attacher.(*csiAttacher)
t.Logf("running test: %v", tc.name)
pvName := fmt.Sprintf("test-pv-%d", i)
volID := fmt.Sprintf("test-vol-%d", i)
@ -240,18 +252,21 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = tc.initAttached
attachment.Status.AttachError = tc.initAttachErr
csiAttacher.waitSleepTime = tc.sleepTime
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to attach: %v", err)
}
trigerWatchEventTime := tc.trigerWatchEventTime
finalAttached := tc.finalAttached
finalAttachErr := tc.finalAttachErr
// after timeout, fakeWatcher will be closed by csiAttacher.waitForVolumeAttachment
if tc.trigerWatchEventTime > 0 && tc.trigerWatchEventTime < tc.timeout {
go func() {
time.Sleep(tc.trigerWatchEventTime)
attachment.Status.Attached = tc.finalAttached
attachment.Status.AttachError = tc.finalAttachErr
time.Sleep(trigerWatchEventTime)
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = finalAttached
attachment.Status.AttachError = finalAttachErr
fakeWatcher.Modify(attachment)
}()
}
@ -337,16 +352,33 @@ func TestAttacherDetach(t *testing.T) {
volID string
attachID string
shouldFail bool
reactor func(action core.Action) (handled bool, ret runtime.Object, err error)
}{
{name: "normal test", volID: "vol-001", attachID: getAttachmentName("vol-001", testDriver, nodeName)},
{name: "normal test 2", volID: "vol-002", attachID: getAttachmentName("vol-002", testDriver, nodeName)},
{name: "object not found", volID: "vol-001", attachID: getAttachmentName("vol-002", testDriver, nodeName), shouldFail: true},
{name: "object not found", volID: "vol-non-existing", attachID: getAttachmentName("vol-003", testDriver, nodeName)},
{
name: "API error",
volID: "vol-004",
attachID: getAttachmentName("vol-004", testDriver, nodeName),
shouldFail: true, // All other API errors should be propagated to caller
reactor: func(action core.Action) (handled bool, ret runtime.Object, err error) {
// return Forbidden to all DELETE requests
if action.Matches("delete", "volumeattachments") {
return true, nil, apierrs.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error"))
}
return false, nil, nil
},
},
}
for _, tc := range testCases {
t.Logf("running test: %v", tc.name)
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t)
defer os.RemoveAll(tmpDir)
if tc.reactor != nil {
client.PrependReactor("*", "*", tc.reactor)
}
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -391,7 +423,7 @@ func TestAttacherDetach(t *testing.T) {
func TestAttacherGetDeviceMountPath(t *testing.T) {
// Setup
// Create a new attacher
plug, _, tmpDir := newTestWatchPlugin(t)
plug, _, tmpDir, _ := newTestWatchPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -474,7 +506,7 @@ func TestAttacherMountDevice(t *testing.T) {
devicePath: "",
deviceMountPath: "path2",
stageUnstageSet: true,
shouldFail: true,
shouldFail: false,
},
{
testName: "no device mount path",
@ -491,10 +523,6 @@ func TestAttacherMountDevice(t *testing.T) {
deviceMountPath: "path2",
stageUnstageSet: false,
},
{
testName: "stage_unstage not set no vars should not fail",
stageUnstageSet: false,
},
}
for _, tc := range testCases {
@ -504,7 +532,7 @@ func TestAttacherMountDevice(t *testing.T) {
// Setup
// Create a new attacher
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -539,7 +567,7 @@ func TestAttacherMountDevice(t *testing.T) {
if !tc.shouldFail {
t.Errorf("test should not fail, but error occurred: %v", err)
}
return
continue
}
if err == nil && tc.shouldFail {
t.Errorf("test should fail, but no error occurred")
@ -551,8 +579,8 @@ func TestAttacherMountDevice(t *testing.T) {
numStaged = 0
}
cdc := csiAttacher.csiClient.(*csiDriverClient)
staged := cdc.nodeClient.(*fake.NodeClient).GetNodeStagedVolumes()
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
staged := cdc.nodeClient.GetNodeStagedVolumes()
if len(staged) != numStaged {
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", numStaged, len(staged))
}
@ -573,42 +601,52 @@ func TestAttacherUnmountDevice(t *testing.T) {
testName string
volID string
deviceMountPath string
jsonFile string
createPV bool
stageUnstageSet bool
shouldFail bool
}{
{
testName: "normal",
testName: "normal, json file exists",
volID: "project/zone/test-vol1",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName": "csi", "volumeHandle":"project/zone/test-vol1"}`,
createPV: false,
stageUnstageSet: true,
},
{
testName: "no device mount path",
testName: "normal, json file doesn't exist -> use PV",
volID: "project/zone/test-vol1",
deviceMountPath: "",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: true,
stageUnstageSet: true,
},
{
testName: "invalid json -> use PV",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName"}}`,
createPV: true,
stageUnstageSet: true,
},
{
testName: "no json, no PV.volID",
volID: "",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: true,
shouldFail: true,
},
{
testName: "missing part of device mount path",
testName: "no json, no PV",
volID: "project/zone/test-vol1",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: false,
stageUnstageSet: true,
shouldFail: true,
},
{
testName: "test volume name mismatch",
volID: "project/zone/test-vol1",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
stageUnstageSet: true,
shouldFail: true,
},
{
testName: "stage_unstage not set",
volID: "project/zone/test-vol1",
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
stageUnstageSet: false,
},
{
testName: "stage_unstage not set no vars should not fail",
stageUnstageSet: false,
@ -619,7 +657,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
t.Logf("Running test case: %s", tc.testName)
// Setup
// Create a new attacher
plug, _, tmpDir := newTestWatchPlugin(t)
plug, _, tmpDir, _ := newTestWatchPlugin(t)
defer os.RemoveAll(tmpDir)
attacher, err0 := plug.NewAttacher()
if err0 != nil {
@ -628,29 +666,45 @@ func TestAttacherUnmountDevice(t *testing.T) {
csiAttacher := attacher.(*csiAttacher)
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
// Add the volume to NodeStagedVolumes
cdc := csiAttacher.csiClient.(*csiDriverClient)
cdc.nodeClient.(*fake.NodeClient).AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
if tc.deviceMountPath != "" {
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
}
// Make the PV for this object
// Add the volume to NodeStagedVolumes
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
// Make JSON for this object
if tc.deviceMountPath != "" {
if err := os.MkdirAll(tc.deviceMountPath, 0755); err != nil {
t.Fatalf("error creating directory %s: %s", tc.deviceMountPath, err)
}
}
dir := filepath.Dir(tc.deviceMountPath)
// dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}
pvName := filepath.Base(dir)
pv := makeTestPV(pvName, 5, "csi", tc.volID)
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
if err != nil && !tc.shouldFail {
t.Fatalf("Failed to create PV: %v", err)
if tc.jsonFile != "" {
dataPath := filepath.Join(dir, volDataFileName)
if err := ioutil.WriteFile(dataPath, []byte(tc.jsonFile), 0644); err != nil {
t.Fatalf("error creating %s: %s", dataPath, err)
}
}
if tc.createPV {
// Make the PV for this object
pvName := filepath.Base(dir)
pv := makeTestPV(pvName, 5, "csi", tc.volID)
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
if err != nil && !tc.shouldFail {
t.Fatalf("Failed to create PV: %v", err)
}
}
// Run
err = csiAttacher.UnmountDevice(tc.deviceMountPath)
err := csiAttacher.UnmountDevice(tc.deviceMountPath)
// Verify
if err != nil {
if !tc.shouldFail {
t.Errorf("test should not fail, but error occurred: %v", err)
}
return
continue
}
if err == nil && tc.shouldFail {
t.Errorf("test should fail, but no error occurred")
@ -661,7 +715,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
if !tc.stageUnstageSet {
expectedSet = 1
}
staged := cdc.nodeClient.(*fake.NodeClient).GetNodeStagedVolumes()
staged := cdc.nodeClient.GetNodeStagedVolumes()
if len(staged) != expectedSet {
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", expectedSet, len(staged))
}
@ -673,18 +727,30 @@ func TestAttacherUnmountDevice(t *testing.T) {
t.Errorf("could not find expected staged volume: %s", tc.volID)
}
if tc.jsonFile != "" && !tc.shouldFail {
dataPath := filepath.Join(dir, volDataFileName)
if _, err := os.Stat(dataPath); !os.IsNotExist(err) {
if err != nil {
t.Errorf("error checking file %s: %s", dataPath, err)
} else {
t.Errorf("json file %s should not exists, but it does", dataPath)
}
} else {
t.Logf("json file %s was correctly removed", dataPath)
}
}
}
}
// create a plugin mgr to load plugins and setup a fake client
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.FakeWatcher, string) {
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
}
fakeClient := fakeclient.NewSimpleClientset()
fakeWatcher := watch.NewFake()
fakeWatcher := watch.NewRaceFreeFake()
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
host := volumetest.NewFakeVolumeHost(
@ -705,5 +771,5 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.FakeWatcher, string) {
t.Fatalf("cannot assert plugin to be type csiPlugin")
}
return csiPlug, fakeWatcher, tmpDir
return csiPlug, fakeWatcher, tmpDir, fakeClient
}

283
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go generated vendored Normal file
View File

@ -0,0 +1,283 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"github.com/golang/glog"
"k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/volume"
)
type csiBlockMapper struct {
k8s kubernetes.Interface
csiClient csiClient
plugin *csiPlugin
driverName string
specName string
volumeID string
readOnly bool
spec *volume.Spec
podUID types.UID
volumeInfo map[string]string
}
var _ volume.BlockVolumeMapper = &csiBlockMapper{}
// GetGlobalMapPath returns a path (on the node) where the devicePath will be symlinked to
// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host)
glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
return dir, nil
}
// GetPodDeviceMapPath returns pod's device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~csi/, {volumeID}
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
path, specName := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName
glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath = %s", path))
return path, specName
}
// SetUpDevice ensures the device is attached returns path where the device is located.
func (m *csiBlockMapper) SetUpDevice() (string, error) {
if !m.plugin.blockEnabled {
return "", errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof(log("blockMapper.SetupDevice called"))
if m.spec == nil {
glog.Error(log("blockMapper.Map spec is nil"))
return "", fmt.Errorf("spec is nil")
}
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err))
return "", err
}
globalMapPath, err := m.GetGlobalMapPath(m.spec)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err))
return "", err
}
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
return "", err
}
if !stageUnstageSet {
glog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
return "", nil
}
// Start MountDevice
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
return "", err
}
if attachment == nil {
glog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID))
return "", errors.New("no existing VolumeAttachment found")
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
nodeStageSecrets := map[string]string{}
if csiSource.NodeStageSecretRef != nil {
nodeStageSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodeStageSecretRef)
if err != nil {
return "", fmt.Errorf("failed to get NodeStageSecretRef %s/%s: %v",
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
}
}
// create globalMapPath before call to NodeStageVolume
if err := os.MkdirAll(globalMapPath, 0750); err != nil {
glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err))
return "", err
}
glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if m.spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
}
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
globalMapPath,
fsTypeBlockName,
accessMode,
nodeStageSecrets,
csiSource.VolumeAttributes)
if err != nil {
glog.Error(log("blockMapper.SetupDevice failed: %v", err))
if err := os.RemoveAll(globalMapPath); err != nil {
glog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err))
}
return "", err
}
glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPath))
return globalMapPath, nil
}
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
if !m.plugin.blockEnabled {
return errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath))
if m.spec == nil {
glog.Error(log("blockMapper.MapDevice spec is nil"))
return fmt.Errorf("spec is nil")
}
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
glog.Error(log("blockMapper.Map failed to get CSI persistent source: %v", err))
return err
}
dir := filepath.Join(volumeMapPath, volumeMapName)
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err))
return err
}
if attachment == nil {
glog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID))
return errors.New("no existing VolumeAttachment found")
}
publishVolumeInfo := attachment.Status.AttachmentMetadata
nodePublishSecrets := map[string]string{}
if csiSource.NodePublishSecretRef != nil {
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
if err != nil {
glog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v",
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
return err
}
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("blockMapper.MapDevice failed to create dir %#v: %v", dir, err))
return err
}
glog.V(4).Info(log("blockMapper.MapDevice created NodePublish path [%s]", dir))
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if m.spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
}
err = csi.NodePublishVolume(
ctx,
m.volumeID,
m.readOnly,
globalMapPath,
dir,
accessMode,
publishVolumeInfo,
csiSource.VolumeAttributes,
nodePublishSecrets,
fsTypeBlockName,
)
if err != nil {
glog.Errorf(log("blockMapper.MapDevice failed: %v", err))
if err := os.RemoveAll(dir); err != nil {
glog.Error(log("blockMapper.MapDevice failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
}
return err
}
return nil
}
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
// TearDownDevice removes traces of the SetUpDevice.
func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error {
if !m.plugin.blockEnabled {
return errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath))
csi := m.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// unmap global device map path
if err := csi.NodeUnstageVolume(ctx, m.volumeID, globalMapPath); err != nil {
glog.Errorf(log("blockMapper.TearDownDevice failed: %v", err))
return err
}
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath))
// request to remove pod volume map path also
podVolumePath, volumeName := m.GetPodDeviceMapPath()
podVolumeMapPath := filepath.Join(podVolumePath, volumeName)
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, podVolumeMapPath); err != nil {
glog.Error(log("blockMapper.TearDownDevice failed: %v", err))
return err
}
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath))
return nil
}

View File

@ -0,0 +1,264 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"fmt"
"os"
"path"
"path/filepath"
"testing"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
fakeclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
// TODO (vladimirvivien) specName with slashes will not work
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/%s/%s", "spec-0", "dev")),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/%s/%s", "test.spec.1", "dev")),
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
path, err := csiMapper.GetGlobalMapPath(spec)
if err != nil {
t.Errorf("mapper GetGlobalMapPath failed: %v", err)
}
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
}
}
func TestBlockMapperSetupDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
tmpDir,
fakeClient,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
pvName := pv.GetName()
nodeName := string(plug.host.GetNodeName())
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// MapDevice
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to create new mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
csiMapper.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = true
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
t.Log("created attachement ", attachID)
devicePath, err := csiMapper.SetUpDevice()
if err != nil {
t.Fatalf("mapper failed to SetupDevice: %v", err)
}
globalMapPath, err := csiMapper.GetGlobalMapPath(spec)
if err != nil {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
if devicePath != globalMapPath {
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, globalMapPath)
}
vols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if vols[csiMapper.volumeID] != devicePath {
t.Error("csi server may not have received NodePublishVolume call")
}
}
func TestBlockMapperMapDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
tmpDir,
fakeClient,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
pvName := pv.GetName()
nodeName := string(plug.host.GetNodeName())
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// MapDevice
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("failed to create new mapper: %v", err)
}
csiMapper := mapper.(*csiBlockMapper)
csiMapper.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
attachment := makeTestAttachment(attachID, nodeName, pvName)
attachment.Status.Attached = true
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Fatalf("failed to setup VolumeAttachment: %v", err)
}
t.Log("created attachement ", attachID)
devicePath, err := csiMapper.SetUpDevice()
if err != nil {
t.Fatalf("mapper failed to SetupDevice: %v", err)
}
globalMapPath, err := csiMapper.GetGlobalMapPath(csiMapper.spec)
if err != nil {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
// Map device to global and pod device map path
volumeMapPath, volName := csiMapper.GetPodDeviceMapPath()
err = csiMapper.MapDevice(devicePath, globalMapPath, volumeMapPath, volName, csiMapper.podUID)
if err != nil {
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
}
if _, err := os.Stat(filepath.Join(volumeMapPath, volName)); err != nil {
if os.IsNotExist(err) {
t.Errorf("mapper.MapDevice failed, volume path not created: %s", volumeMapPath)
} else {
t.Errorf("mapper.MapDevice failed: %v", err)
}
}
pubs := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMapper.volumeID] != volumeMapPath {
t.Error("csi server may not have received NodePublishVolume call")
}
}
func TestBlockMapperTearDownDevice(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
fakeClient := fakeclient.NewSimpleClientset()
host := volumetest.NewFakeVolumeHostWithNodeName(
tmpDir,
fakeClient,
nil,
"fakeNode",
)
plug.host = host
pv := makeTestPV("test-pv", 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// save volume data
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
dir,
volDataFileName,
map[string]string{
volDataKey.specVolID: pv.ObjectMeta.Name,
volDataKey.driverName: testDriver,
volDataKey.volHandle: testVol,
},
); err != nil {
t.Fatalf("failed to save volume data: %v", err)
}
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
if err != nil {
t.Fatalf("failed to make a new Unmapper: %v", err)
}
csiUnmapper := unmapper.(*csiBlockMapper)
csiUnmapper.csiClient = setupClient(t, true)
globalMapPath, err := csiUnmapper.GetGlobalMapPath(spec)
if err != nil {
t.Fatalf("unmapper failed to GetGlobalMapPath: %v", err)
}
err = csiUnmapper.TearDownDevice(globalMapPath, "/dev/test")
if err != nil {
t.Fatal(err)
}
// ensure csi client call and node unstaged
vols := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
if _, ok := vols[csiUnmapper.volumeID]; ok {
t.Error("csi server may not have received NodeUnstageVolume call")
}
// ensure csi client call and node unpblished
pubs := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if _, ok := pubs[csiUnmapper.volumeID]; ok {
t.Error("csi server may not have received NodeUnpublishVolume call")
}
}

View File

@ -17,20 +17,23 @@ limitations under the License.
package csi
import (
"context"
"errors"
"fmt"
"net"
"time"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog"
grpctx "golang.org/x/net/context"
"google.golang.org/grpc"
api "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
type csiClient interface {
NodePublishVolume(
ctx grpctx.Context,
ctx context.Context,
volumeid string,
readOnly bool,
stagingTargetPath string,
@ -42,11 +45,11 @@ type csiClient interface {
fsType string,
) error
NodeUnpublishVolume(
ctx grpctx.Context,
ctx context.Context,
volID string,
targetPath string,
) error
NodeStageVolume(ctx grpctx.Context,
NodeStageVolume(ctx context.Context,
volID string,
publishVolumeInfo map[string]string,
stagingTargetPath string,
@ -55,55 +58,25 @@ type csiClient interface {
nodeStageSecrets map[string]string,
volumeAttribs map[string]string,
) error
NodeUnstageVolume(ctx grpctx.Context, volID, stagingTargetPath string) error
NodeGetCapabilities(ctx grpctx.Context) ([]*csipb.NodeServiceCapability, error)
NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error
NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error)
}
// csiClient encapsulates all csi-plugin methods
type csiDriverClient struct {
network string
addr string
conn *grpc.ClientConn
idClient csipb.IdentityClient
nodeClient csipb.NodeClient
ctrlClient csipb.ControllerClient
versionAsserted bool
versionSupported bool
publishAsserted bool
publishCapable bool
driverName string
nodeClient csipb.NodeClient
}
func newCsiDriverClient(network, addr string) *csiDriverClient {
return &csiDriverClient{network: network, addr: addr}
}
var _ csiClient = &csiDriverClient{}
// assertConnection ensures a valid connection has been established
// if not, it creates a new connection and associated clients
func (c *csiDriverClient) assertConnection() error {
if c.conn == nil {
conn, err := grpc.Dial(
c.addr,
grpc.WithInsecure(),
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
return net.Dial(c.network, target)
}),
)
if err != nil {
return err
}
c.conn = conn
c.idClient = csipb.NewIdentityClient(conn)
c.nodeClient = csipb.NewNodeClient(conn)
c.ctrlClient = csipb.NewControllerClient(conn)
// set supported version
}
return nil
func newCsiDriverClient(driverName string) *csiDriverClient {
c := &csiDriverClient{driverName: driverName}
return c
}
func (c *csiDriverClient) NodePublishVolume(
ctx grpctx.Context,
ctx context.Context,
volID string,
readOnly bool,
stagingTargetPath string,
@ -121,10 +94,13 @@ func (c *csiDriverClient) NodePublishVolume(
if targetPath == "" {
return errors.New("missing target path")
}
if err := c.assertConnection(); err != nil {
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
conn, err := newGrpcConn(c.driverName)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
req := &csipb.NodePublishVolumeRequest{
VolumeId: volID,
@ -137,22 +113,29 @@ func (c *csiDriverClient) NodePublishVolume(
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
},
},
},
}
if stagingTargetPath != "" {
req.StagingTargetPath = stagingTargetPath
}
_, err := c.nodeClient.NodePublishVolume(ctx, req)
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
Block: &csipb.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
},
}
}
_, err = nodeClient.NodePublishVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error {
func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
@ -160,21 +143,24 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string,
if targetPath == "" {
return errors.New("missing target path")
}
if err := c.assertConnection(); err != nil {
glog.Error(log("failed to assert a connection: %v", err))
conn, err := newGrpcConn(c.driverName)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
req := &csipb.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
_, err := c.nodeClient.NodeUnpublishVolume(ctx, req)
_, err = nodeClient.NodeUnpublishVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeStageVolume(ctx grpctx.Context,
func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
volID string,
publishInfo map[string]string,
stagingTargetPath string,
@ -190,10 +176,13 @@ func (c *csiDriverClient) NodeStageVolume(ctx grpctx.Context,
if stagingTargetPath == "" {
return errors.New("missing staging target path")
}
if err := c.assertConnection(); err != nil {
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
conn, err := newGrpcConn(c.driverName)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
req := &csipb.NodeStageVolumeRequest{
VolumeId: volID,
@ -203,21 +192,28 @@ func (c *csiDriverClient) NodeStageVolume(ctx grpctx.Context,
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
},
},
},
NodeStageSecrets: nodeStageSecrets,
VolumeAttributes: volumeAttribs,
}
_, err := c.nodeClient.NodeStageVolume(ctx, req)
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
Block: &csipb.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
},
}
}
_, err = nodeClient.NodeStageVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeUnstageVolume(ctx grpctx.Context, volID, stagingTargetPath string) error {
func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
if volID == "" {
return errors.New("missing volume id")
@ -225,27 +221,34 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx grpctx.Context, volID, stagingTa
if stagingTargetPath == "" {
return errors.New("missing staging target path")
}
if err := c.assertConnection(); err != nil {
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
conn, err := newGrpcConn(c.driverName)
if err != nil {
return err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
req := &csipb.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
_, err := c.nodeClient.NodeUnstageVolume(ctx, req)
_, err = nodeClient.NodeUnstageVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeGetCapabilities(ctx grpctx.Context) ([]*csipb.NodeServiceCapability, error) {
func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
glog.V(4).Info(log("calling NodeGetCapabilities rpc"))
if err := c.assertConnection(); err != nil {
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
conn, err := newGrpcConn(c.driverName)
if err != nil {
return nil, err
}
defer conn.Close()
nodeClient := csipb.NewNodeClient(conn)
req := &csipb.NodeGetCapabilitiesRequest{}
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return nil, err
}
@ -263,3 +266,28 @@ func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_A
}
return csipb.VolumeCapability_AccessMode_UNKNOWN
}
func newGrpcConn(driverName string) (*grpc.ClientConn, error) {
if driverName == "" {
return nil, fmt.Errorf("driver name is empty")
}
addr := fmt.Sprintf(csiAddrTemplate, driverName)
// TODO once KubeletPluginsWatcher graduates to beta, remove FeatureGate check
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) {
driver, ok := csiDrivers.driversMap[driverName]
if !ok {
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
}
addr = driver.driverEndpoint
}
network := "unix"
glog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr))
return grpc.Dial(
addr,
grpc.WithInsecure(),
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
return net.Dial(network, target)
}),
)
}

View File

@ -17,25 +17,128 @@ limitations under the License.
package csi
import (
"context"
"errors"
"testing"
grpctx "golang.org/x/net/context"
"google.golang.org/grpc"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
api "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume/csi/fake"
)
func setupClient(t *testing.T, stageUnstageSet bool) *csiDriverClient {
client := newCsiDriverClient("unix", "/tmp/test.sock")
client.conn = new(grpc.ClientConn) //avoids creating conn object
type fakeCsiDriverClient struct {
t *testing.T
nodeClient *fake.NodeClient
}
// setup mock grpc clients
client.idClient = fake.NewIdentityClient()
client.nodeClient = fake.NewNodeClient(stageUnstageSet)
client.ctrlClient = fake.NewControllerClient()
func newFakeCsiDriverClient(t *testing.T, stagingCapable bool) *fakeCsiDriverClient {
return &fakeCsiDriverClient{
t: t,
nodeClient: fake.NewNodeClient(stagingCapable),
}
}
return client
func (c *fakeCsiDriverClient) NodePublishVolume(
ctx context.Context,
volID string,
readOnly bool,
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
volumeInfo map[string]string,
volumeAttribs map[string]string,
nodePublishSecrets map[string]string,
fsType string,
) error {
c.t.Log("calling fake.NodePublishVolume...")
req := &csipb.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishInfo: volumeInfo,
VolumeAttributes: volumeAttribs,
NodePublishSecrets: nodePublishSecrets,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
},
},
},
}
_, err := c.nodeClient.NodePublishVolume(ctx, req)
return err
}
func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
c.t.Log("calling fake.NodeUnpublishVolume...")
req := &csipb.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
_, err := c.nodeClient.NodeUnpublishVolume(ctx, req)
return err
}
func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
volID string,
publishInfo map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
nodeStageSecrets map[string]string,
volumeAttribs map[string]string,
) error {
c.t.Log("calling fake.NodeStageVolume...")
req := &csipb.NodeStageVolumeRequest{
VolumeId: volID,
PublishInfo: publishInfo,
StagingTargetPath: stagingTargetPath,
VolumeCapability: &csipb.VolumeCapability{
AccessMode: &csipb.VolumeCapability_AccessMode{
Mode: asCSIAccessMode(accessMode),
},
AccessType: &csipb.VolumeCapability_Mount{
Mount: &csipb.VolumeCapability_MountVolume{
FsType: fsType,
},
},
},
NodeStageSecrets: nodeStageSecrets,
VolumeAttributes: volumeAttribs,
}
_, err := c.nodeClient.NodeStageVolume(ctx, req)
return err
}
func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
c.t.Log("calling fake.NodeUnstageVolume...")
req := &csipb.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
_, err := c.nodeClient.NodeUnstageVolume(ctx, req)
return err
}
func (c *fakeCsiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
c.t.Log("calling fake.NodeGetCapabilities...")
req := &csipb.NodeGetCapabilitiesRequest{}
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return nil, err
}
return resp.GetCapabilities(), nil
}
func setupClient(t *testing.T, stageUnstageSet bool) csiClient {
return newFakeCsiDriverClient(t, stageUnstageSet)
}
func TestClientNodePublishVolume(t *testing.T) {
@ -58,9 +161,9 @@ func TestClientNodePublishVolume(t *testing.T) {
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
err := client.NodePublishVolume(
grpctx.Background(),
context.Background(),
tc.volID,
false,
"",
@ -96,8 +199,8 @@ func TestClientNodeUnpublishVolume(t *testing.T) {
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
err := client.NodeUnpublishVolume(grpctx.Background(), tc.volID, tc.targetPath)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
err := client.NodeUnpublishVolume(context.Background(), tc.volID, tc.targetPath)
if tc.mustFail && err == nil {
t.Error("test must fail, but err is nil")
}
@ -125,9 +228,9 @@ func TestClientNodeStageVolume(t *testing.T) {
for _, tc := range testCases {
t.Logf("Running test case: %s", tc.name)
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
err := client.NodeStageVolume(
grpctx.Background(),
context.Background(),
tc.volID,
map[string]string{"device": "/dev/null"},
tc.stagingTargetPath,
@ -161,9 +264,9 @@ func TestClientNodeUnstageVolume(t *testing.T) {
for _, tc := range testCases {
t.Logf("Running test case: %s", tc.name)
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
err := client.NodeUnstageVolume(
grpctx.Background(),
context.Background(),
tc.volID, tc.stagingTargetPath,
)
if tc.mustFail && err == nil {

View File

@ -17,14 +17,14 @@ limitations under the License.
package csi
import (
"encoding/json"
"context"
"errors"
"fmt"
"os"
"path"
"github.com/golang/glog"
grpctx "golang.org/x/net/context"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -34,8 +34,6 @@ import (
"k8s.io/kubernetes/pkg/volume/util"
)
const defaultFSType = "ext4"
//TODO (vladimirvivien) move this in a central loc later
var (
volDataKey = struct {
@ -54,8 +52,8 @@ var (
)
type csiMountMgr struct {
k8s kubernetes.Interface
csiClient csiClient
k8s kubernetes.Interface
plugin *csiPlugin
driverName string
volumeID string
@ -118,8 +116,9 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
nodeName := string(c.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so
deviceMountPath := ""
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
@ -153,6 +152,15 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
attribs := csiSource.VolumeAttributes
nodePublishSecrets := map[string]string{}
if csiSource.NodePublishSecretRef != nil {
nodePublishSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodePublishSecretRef)
if err != nil {
return fmt.Errorf("fetching NodePublishSecretRef %s/%s failed: %v",
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
}
}
// create target_dir before call to NodePublish
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
@ -160,24 +168,6 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
}
glog.V(4).Info(log("created target path successfully [%s]", dir))
// persist volume info data for teardown
volData := map[string]string{
volDataKey.specVolID: c.spec.Name(),
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
volDataKey.nodeName: nodeName,
volDataKey.attachmentID: attachID,
}
if err := saveVolumeData(c.plugin, c.podUID, c.spec.Name(), volData); err != nil {
glog.Error(log("mounter.SetUpAt failed to save volume info data: %v", err))
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.SetUpAt failed to remove mount dir after a saveVolumeData() error [%s]: %v", dir, err))
return err
}
return err
}
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := api.ReadWriteOnce
if c.spec.PersistentVolume.Spec.AccessModes != nil {
@ -185,13 +175,6 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
}
fsType := csiSource.FSType
if len(fsType) == 0 {
fsType = defaultFSType
}
nodePublishSecrets := map[string]string{}
if csiSource.NodePublishSecretRef != nil {
nodePublishSecrets = getCredentialsFromSecret(c.k8s, csiSource.NodePublishSecretRef)
}
err = csi.NodePublishVolume(
ctx,
c.volumeID,
@ -207,22 +190,57 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
if err != nil {
glog.Errorf(log("mounter.SetupAt failed: %v", err))
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.SetuAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
return err
if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil {
glog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
}
return err
}
// apply volume ownership
if !c.readOnly && fsGroup != nil {
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
// attempt to rollback mount.
glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err))
glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID))
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
glog.Error(log(
"mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)",
c.volumeID, unpubErr, err,
))
return fmt.Errorf("%v (caused by %v)", unpubErr, err)
}
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
glog.Error(log(
"mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)",
dir, unmountErr, err,
))
return fmt.Errorf("%v (caused by %v)", unmountErr, err)
}
return err
}
glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID))
}
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
return nil
}
func (c *csiMountMgr) GetAttributes() volume.Attributes {
mounter := c.plugin.host.GetMounter(c.plugin.GetPluginName())
path := c.GetPath()
supportSelinux, err := mounter.GetSELinuxSupport(path)
if err != nil {
glog.V(2).Info(log("error checking for SELinux support: %s", err))
// Best guess
supportSelinux = false
}
return volume.Attributes{
ReadOnly: c.readOnly,
Managed: !c.readOnly,
SupportsSELinux: false,
SupportsSELinux: supportSelinux,
}
}
@ -249,34 +267,12 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
return nil
}
if err != nil {
glog.Error(log("mounter.TearDownAt failed to get CSI persistent source: %v", err))
return err
}
// load volume info from file
dataDir := path.Dir(dir) // dropoff /mount at end
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmounter.Teardown failed to load volume data file using dir [%s]: %v", dir, err))
return err
}
volID := data[volDataKey.volHandle]
driverName := data[volDataKey.driverName]
if c.csiClient == nil {
addr := fmt.Sprintf(csiAddrTemplate, driverName)
client := newCsiDriverClient("unix", addr)
glog.V(4).Infof(log("unmounter csiClient setup [volume=%v,driver=%v]", volID, driverName))
c.csiClient = client
}
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
defer cancel()
volID := c.volumeID
csi := c.csiClient
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
glog.Errorf(log("mounter.TearDownAt failed: %v", err))
return err
@ -292,50 +288,6 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
return nil
}
// saveVolumeData persists parameter data as json file using the location
// generated by /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
func saveVolumeData(p *csiPlugin, podUID types.UID, specVolID string, data map[string]string) error {
dir := getTargetPath(podUID, specVolID, p.host)
dataFilePath := path.Join(dir, volDataFileName)
file, err := os.Create(dataFilePath)
if err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
defer file.Close()
if err := json.NewEncoder(file).Encode(data); err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
return nil
}
// loadVolumeData uses the directory returned by mounter.GetPath with value
// /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolumeId>/mount.
// The function extracts specVolumeID and uses it to load the json data file from dir
// /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
// remove /mount at the end
dataFileName := path.Join(dir, fileName)
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
file, err := os.Open(dataFileName)
if err != nil {
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
return nil, err
}
defer file.Close()
data := map[string]string{}
if err := json.NewDecoder(file).Decode(&data); err != nil {
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
return nil, err
}
return data, nil
}
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
mounter := plug.host.GetMounter(plug.GetPluginName())

View File

@ -31,8 +31,8 @@ import (
"k8s.io/apimachinery/pkg/types"
fakeclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/fake"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
var (
@ -78,7 +78,6 @@ func TestMounterGetPath(t *testing.T) {
csiMounter := mounter.(*csiMountMgr)
path := csiMounter.GetPath()
t.Logf("*** GetPath: %s", path)
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
@ -114,7 +113,7 @@ func TestMounterSetUp(t *testing.T) {
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, false)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
@ -141,9 +140,16 @@ func TestMounterSetUp(t *testing.T) {
}
// Mounter.SetUp()
if err := csiMounter.SetUp(nil); err != nil {
fsGroup := int64(2000)
if err := csiMounter.SetUp(&fsGroup); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
@ -154,7 +160,7 @@ func TestMounterSetUp(t *testing.T) {
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*csiDriverClient).nodeClient.(*fake.NodeClient).GetNodePublishedVolumes()
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID] != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
}
@ -163,39 +169,46 @@ func TestMounterSetUp(t *testing.T) {
func TestUnmounterTeardown(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file prior to unmount
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
// do a fake local mount
diskMounter := util.NewSafeFormatAndMountFromHost(plug.GetPluginName(), plug.host)
if err := diskMounter.FormatAndMount("/fake/device", dir, "testfs", nil); err != nil {
t.Errorf("failed to mount dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
path.Dir(dir),
volDataFileName,
map[string]string{
volDataKey.specVolID: pv.ObjectMeta.Name,
volDataKey.driverName: testDriver,
volDataKey.volHandle: testVol,
},
); err != nil {
t.Fatalf("failed to save volume data: %v", err)
}
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
if err != nil {
t.Fatalf("failed to make a new Unmounter: %v", err)
}
csiUnmounter := unmounter.(*csiMountMgr)
csiUnmounter.csiClient = setupClient(t, false)
dir := csiUnmounter.GetPath()
// save the data file prior to unmount
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
plug,
testPodUID,
"test-pv",
map[string]string{volDataKey.specVolID: "test-pv", volDataKey.driverName: "driver", volDataKey.volHandle: "vol-handle"},
); err != nil {
t.Fatalf("failed to save volume data: %v", err)
}
csiUnmounter.csiClient = setupClient(t, true)
err = csiUnmounter.TearDownAt(dir)
if err != nil {
t.Fatal(err)
}
// ensure csi client call
pubs := csiUnmounter.csiClient.(*csiDriverClient).nodeClient.(*fake.NodeClient).GetNodePublishedVolumes()
pubs := csiUnmounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if _, ok := pubs[csiUnmounter.volumeID]; ok {
t.Error("csi server may not have received NodeUnpublishVolume call")
}
@ -222,7 +235,7 @@ func TestSaveVolumeData(t *testing.T) {
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
}
err := saveVolumeData(plug, testPodUID, specVolID, tc.data)
err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data)
if !tc.shouldFail && err != nil {
t.Errorf("unexpected failure: %v", err)

View File

@ -19,14 +19,21 @@ package csi
import (
"errors"
"fmt"
"os"
"path"
"strings"
"sync"
"time"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
)
const (
@ -40,16 +47,19 @@ const (
csiTimeout = 15 * time.Second
volNameSep = "^"
volDataFileName = "vol_data.json"
fsTypeBlockName = "block"
)
type csiPlugin struct {
host volume.VolumeHost
host volume.VolumeHost
blockEnabled bool
}
// ProbeVolumePlugins returns implemented plugins
func ProbeVolumePlugins() []volume.VolumePlugin {
p := &csiPlugin{
host: nil,
host: nil,
blockEnabled: utilfeature.DefaultFeatureGate.Enabled(features.CSIBlockVolume),
}
return []volume.VolumePlugin{p}
}
@ -57,9 +67,54 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
// volume.VolumePlugin methods
var _ volume.VolumePlugin = &csiPlugin{}
type csiDriver struct {
driverName string
driverEndpoint string
}
type csiDriversStore struct {
driversMap map[string]csiDriver
sync.RWMutex
}
// csiDrivers map keep track of all registered CSI drivers on the node and their
// corresponding sockets
var csiDrivers csiDriversStore
var lm labelmanager.Interface
// RegistrationCallback is called by kubelet's plugin watcher upon detection
// of a new registration socket opened by CSI Driver registrar side car.
func RegistrationCallback(pluginName string, endpoint string, versions []string, socketPath string) (error, chan bool) {
glog.Infof(log("Callback from kubelet with plugin name: %s endpoint: %s versions: %s socket path: %s",
pluginName, endpoint, strings.Join(versions, ","), socketPath))
if endpoint == "" {
endpoint = socketPath
}
// Calling nodeLabelManager to update label for newly registered CSI driver
err := lm.AddLabels(pluginName)
if err != nil {
return err, nil
}
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
csiDrivers.Lock()
defer csiDrivers.Unlock()
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint}
return nil, nil
}
func (p *csiPlugin) Init(host volume.VolumeHost) error {
glog.Info(log("plugin initializing..."))
p.host = host
// Initializing csiDrivers map and label management channels
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
lm = labelmanager.NewLabelManager(host.GetNodeName(), host.GetKubeClient())
return nil
}
@ -98,11 +153,10 @@ func (p *csiPlugin) NewMounter(
if err != nil {
return nil, err
}
// before it is used in any paths such as socket etc
addr := fmt.Sprintf(csiAddrTemplate, pvSource.Driver)
glog.V(4).Infof(log("setting up mounter for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
client := newCsiDriverClient("unix", addr)
readOnly, err := getReadOnlyFromSpec(spec)
if err != nil {
return nil, err
}
k8s := p.host.GetKubeClient()
if k8s == nil {
@ -110,6 +164,8 @@ func (p *csiPlugin) NewMounter(
return nil, errors.New("failed to get a Kubernetes client")
}
csi := newCsiDriverClient(pvSource.Driver)
mounter := &csiMountMgr{
plugin: p,
k8s: k8s,
@ -119,18 +175,66 @@ func (p *csiPlugin) NewMounter(
driverName: pvSource.Driver,
volumeID: pvSource.VolumeHandle,
specVolumeID: spec.Name(),
csiClient: client,
csiClient: csi,
readOnly: readOnly,
}
// Save volume info in pod dir
dir := mounter.GetPath()
dataDir := path.Dir(dir) // dropoff /mount at end
if err := os.MkdirAll(dataDir, 0750); err != nil {
glog.Error(log("failed to create dir %#v: %v", dataDir, err))
return nil, err
}
glog.V(4).Info(log("created path successfully [%s]", dataDir))
// persist volume info data for teardown
node := string(p.host.GetNodeName())
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
volData := map[string]string{
volDataKey.specVolID: spec.Name(),
volDataKey.volHandle: pvSource.VolumeHandle,
volDataKey.driverName: pvSource.Driver,
volDataKey.nodeName: node,
volDataKey.attachmentID: attachID,
}
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
if err := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
return nil, err
}
return nil, err
}
glog.V(4).Info(log("mounter created successfully"))
return mounter, nil
}
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
unmounter := &csiMountMgr{
plugin: p,
podUID: podUID,
specVolumeID: specName,
}
// load volume info from file
dir := unmounter.GetPath()
dataDir := path.Dir(dir) // dropoff /mount at end
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err))
return nil, err
}
unmounter.driverName = data[volDataKey.driverName]
unmounter.volumeID = data[volDataKey.volHandle]
unmounter.csiClient = newCsiDriverClient(unmounter.driverName)
return unmounter, nil
}
@ -208,16 +312,132 @@ func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error)
return mount.GetMountRefs(m, deviceMountPath)
}
func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CSI != nil {
return spec.PersistentVolume.Spec.CSI, nil
// BlockVolumePlugin methods
var _ volume.BlockVolumePlugin = &csiPlugin{}
func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opts volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
if !p.blockEnabled {
return nil, errors.New("CSIBlockVolume feature not enabled")
}
return nil, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
pvSource, err := getCSISourceFromSpec(spec)
if err != nil {
return nil, err
}
readOnly, err := getReadOnlyFromSpec(spec)
if err != nil {
return nil, err
}
glog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
client := newCsiDriverClient(pvSource.Driver)
k8s := p.host.GetKubeClient()
if k8s == nil {
glog.Error(log("failed to get a kubernetes client"))
return nil, errors.New("failed to get a Kubernetes client")
}
mapper := &csiBlockMapper{
csiClient: client,
k8s: k8s,
plugin: p,
volumeID: pvSource.VolumeHandle,
driverName: pvSource.Driver,
readOnly: readOnly,
spec: spec,
podUID: podRef.UID,
}
// Save volume info in pod dir
dataDir := getVolumeDeviceDataDir(spec.Name(), p.host)
if err := os.MkdirAll(dataDir, 0750); err != nil {
glog.Error(log("failed to create data dir %s: %v", dataDir, err))
return nil, err
}
glog.V(4).Info(log("created path successfully [%s]", dataDir))
// persist volume info data for teardown
node := string(p.host.GetNodeName())
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
volData := map[string]string{
volDataKey.specVolID: spec.Name(),
volDataKey.volHandle: pvSource.VolumeHandle,
volDataKey.driverName: pvSource.Driver,
volDataKey.nodeName: node,
volDataKey.attachmentID: attachID,
}
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
glog.Error(log("failed to save volume info data: %v", err))
if err := os.RemoveAll(dataDir); err != nil {
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
return nil, err
}
return nil, err
}
return mapper, nil
}
// log prepends log string with `kubernetes.io/csi`
func log(msg string, parts ...interface{}) string {
return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...)
func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
if !p.blockEnabled {
return nil, errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
unmapper := &csiBlockMapper{
plugin: p,
podUID: podUID,
specName: volName,
}
// load volume info from file
dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host)
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
return nil, err
}
unmapper.driverName = data[volDataKey.driverName]
unmapper.volumeID = data[volDataKey.volHandle]
unmapper.csiClient = newCsiDriverClient(unmapper.driverName)
return unmapper, nil
}
func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapPath string) (*volume.Spec, error) {
if !p.blockEnabled {
return nil, errors.New("CSIBlockVolume feature not enabled")
}
glog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
dataDir := getVolumeDeviceDataDir(specVolName, p.host)
volData, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
return nil, err
}
glog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
blockMode := api.PersistentVolumeBlock
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: volData[volDataKey.specVolID],
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
CSI: &api.CSIPersistentVolumeSource{
Driver: volData[volDataKey.driverName],
VolumeHandle: volData[volDataKey.volHandle],
},
},
VolumeMode: &blockMode,
},
}
return volume.NewSpecFromPersistentVolume(pv, false), nil
}

View File

@ -20,12 +20,14 @@ import (
"fmt"
"os"
"path"
"path/filepath"
"testing"
api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
fakeclient "k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
@ -34,6 +36,11 @@ import (
// create a plugin mgr to load plugins and setup a fake client
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
if err != nil {
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
}
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
@ -160,7 +167,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
}
if err := saveVolumeData(plug, testPodUID, tc.specVolID, tc.data); err != nil {
if err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data); err != nil {
t.Fatal(err)
}
}
@ -215,7 +222,21 @@ func TestPluginNewMounter(t *testing.T) {
t.Error("mounter pod not set")
}
if csiMounter.podUID == types.UID("") {
t.Error("mounter podUID mot set")
t.Error("mounter podUID not set")
}
if csiMounter.csiClient == nil {
t.Error("mounter csiClient is nil")
}
// ensure data file is created
dataDir := path.Dir(mounter.GetPath())
dataFile := filepath.Join(dataDir, volDataFileName)
if _, err := os.Stat(dataFile); err != nil {
if os.IsNotExist(err) {
t.Errorf("data file not created %s", dataFile)
} else {
t.Fatal(err)
}
}
}
@ -225,6 +246,25 @@ func TestPluginNewUnmounter(t *testing.T) {
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file to re-create client
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
path.Dir(dir),
volDataFileName,
map[string]string{
volDataKey.specVolID: pv.ObjectMeta.Name,
volDataKey.driverName: testDriver,
volDataKey.volHandle: testVol,
},
); err != nil {
t.Fatalf("failed to save volume data: %v", err)
}
// test unmounter
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
csiUnmounter := unmounter.(*csiMountMgr)
@ -240,6 +280,9 @@ func TestPluginNewUnmounter(t *testing.T) {
t.Error("podUID not set")
}
if csiUnmounter.csiClient == nil {
t.Error("unmounter csiClient is nil")
}
}
func TestPluginNewAttacher(t *testing.T) {
@ -277,3 +320,156 @@ func TestPluginNewDetacher(t *testing.T) {
t.Error("Kubernetes client not set for detacher")
}
}
func TestPluginNewBlockMapper(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
mounter, err := plug.NewBlockVolumeMapper(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new BlockMapper: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI BlockMapper, mapper is nill")
}
csiMapper := mounter.(*csiBlockMapper)
// validate mounter fields
if csiMapper.driverName != testDriver {
t.Error("CSI block mapper missing driver name")
}
if csiMapper.volumeID != testVol {
t.Error("CSI block mapper missing volumeID")
}
if csiMapper.podUID == types.UID("") {
t.Error("CSI block mapper missing pod.UID")
}
if csiMapper.csiClient == nil {
t.Error("mapper csiClient is nil")
}
// ensure data file is created
dataFile := getVolumeDeviceDataDir(csiMapper.spec.Name(), plug.host)
if _, err := os.Stat(dataFile); err != nil {
if os.IsNotExist(err) {
t.Errorf("data file not created %s", dataFile)
} else {
t.Fatal(err)
}
}
}
func TestPluginNewUnmapper(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// save the data file to re-create client
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
dir,
volDataFileName,
map[string]string{
volDataKey.specVolID: pv.ObjectMeta.Name,
volDataKey.driverName: testDriver,
volDataKey.volHandle: testVol,
},
); err != nil {
t.Fatalf("failed to save volume data: %v", err)
}
// test unmounter
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
csiUnmapper := unmapper.(*csiBlockMapper)
if err != nil {
t.Fatalf("Failed to make a new Unmounter: %v", err)
}
if csiUnmapper == nil {
t.Fatal("failed to create CSI Unmounter")
}
if csiUnmapper.podUID != testPodUID {
t.Error("podUID not set")
}
if csiUnmapper.specName != pv.ObjectMeta.Name {
t.Error("specName not set")
}
if csiUnmapper.csiClient == nil {
t.Error("unmapper csiClient is nil")
}
// test loaded vol data
if csiUnmapper.driverName != testDriver {
t.Error("unmapper driverName not set")
}
if csiUnmapper.volumeID != testVol {
t.Error("unmapper volumeHandle not set")
}
}
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
specVolID string
data map[string]string
shouldFail bool
}{
{
name: "valid spec name",
specVolID: "test.vol.id",
data: map[string]string{volDataKey.specVolID: "test.vol.id", volDataKey.volHandle: "test-vol0", volDataKey.driverName: "test-driver0"},
},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
deviceDataDir := getVolumeDeviceDataDir(tc.specVolID, plug.host)
// create data file in csi plugin dir
if tc.data != nil {
if err := os.MkdirAll(deviceDataDir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", deviceDataDir, err)
}
if err := saveVolumeData(deviceDataDir, volDataFileName, tc.data); err != nil {
t.Fatal(err)
}
}
// rebuild spec
spec, err := plug.ConstructBlockVolumeSpec("test-podUID", tc.specVolID, getVolumeDevicePluginDir(tc.specVolID, plug.host))
if tc.shouldFail {
if err == nil {
t.Fatal("expecting ConstructVolumeSpec to fail, but got nil error")
}
continue
}
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
if volHandle != tc.data[volDataKey.volHandle] {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
}
if spec.Name() != tc.specVolID {
t.Errorf("Unexpected spec name %s", spec.Name())
}
}
}

View File

@ -17,22 +17,107 @@ limitations under the License.
package csi
import (
"encoding/json"
"fmt"
"os"
"path"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) map[string]string {
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) {
credentials := map[string]string{}
secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{})
if err != nil {
glog.Warningf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
return credentials
glog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
return credentials, err
}
for key, value := range secret.Data {
credentials[key] = string(value)
}
return credentials
return credentials, nil
}
// saveVolumeData persists parameter data as json file at the provided location
func saveVolumeData(dir string, fileName string, data map[string]string) error {
dataFilePath := path.Join(dir, fileName)
glog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
file, err := os.Create(dataFilePath)
if err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
defer file.Close()
if err := json.NewEncoder(file).Encode(data); err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
return nil
}
// loadVolumeData loads volume info from specified json file/location
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
// remove /mount at the end
dataFileName := path.Join(dir, fileName)
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
file, err := os.Open(dataFileName)
if err != nil {
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
return nil, err
}
defer file.Close()
data := map[string]string{}
if err := json.NewDecoder(file).Decode(&data); err != nil {
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
return nil, err
}
return data, nil
}
func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CSI != nil {
return spec.PersistentVolume.Spec.CSI, nil
}
return nil, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
}
func getReadOnlyFromSpec(spec *volume.Spec) (bool, error) {
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CSI != nil {
return spec.ReadOnly, nil
}
return false, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
}
// log prepends log string with `kubernetes.io/csi`
func log(msg string, parts ...interface{}) string {
return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...)
}
// getVolumeDevicePluginDir returns the path where the CSI plugin keeps the
// symlink for a block device associated with a given specVolumeID.
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/dev
func getVolumeDevicePluginDir(specVolID string, host volume.VolumeHost) string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "dev")
}
// getVolumeDeviceDataDir returns the path where the CSI plugin keeps the
// volume data for a block device associated with a given specVolumeID.
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/data
func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data")
}

View File

@ -7,7 +7,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)

View File

@ -24,7 +24,6 @@ import (
"google.golang.org/grpc"
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
grpctx "golang.org/x/net/context"
)
// IdentityClient is a CSI identity client used for testing
@ -94,7 +93,7 @@ func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string) {
}
// NodePublishVolume implements CSI NodePublishVolume
func (f *NodeClient) NodePublishVolume(ctx grpctx.Context, req *csipb.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodePublishVolumeResponse, error) {
func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodePublishVolumeResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
@ -106,7 +105,7 @@ func (f *NodeClient) NodePublishVolume(ctx grpctx.Context, req *csipb.NodePublis
if req.GetTargetPath() == "" {
return nil, errors.New("missing target path")
}
fsTypes := "ext4|xfs|zfs"
fsTypes := "block|ext4|xfs|zfs"
fsType := req.GetVolumeCapability().GetMount().GetFsType()
if !strings.Contains(fsTypes, fsType) {
return nil, errors.New("invalid fstype")
@ -145,7 +144,7 @@ func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVo
}
fsType := ""
fsTypes := "ext4|xfs|zfs"
fsTypes := "block|ext4|xfs|zfs"
mounted := req.GetVolumeCapability().GetMount()
if mounted != nil {
fsType = mounted.GetFsType()

View File

@ -0,0 +1,30 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["labelmanager.go"],
importpath = "k8s.io/kubernetes/pkg/volume/csi/labelmanager",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,251 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package labelmanager includes internal functions used to add/delete labels to
// kubernetes nodes for corresponding CSI drivers
package labelmanager // import "k8s.io/kubernetes/pkg/volume/csi/labelmanager"
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/retry"
)
const (
// Name of node annotation that contains JSON map of driver names to node
// names
annotationKey = "csi.volume.kubernetes.io/nodeid"
csiPluginName = "kubernetes.io/csi"
)
// labelManagementStruct is struct of channels used for communication between the driver registration
// code and the go routine responsible for managing the node's labels
type labelManagerStruct struct {
nodeName types.NodeName
k8s kubernetes.Interface
}
// Interface implements an interface for managing labels of a node
type Interface interface {
AddLabels(driverName string) error
}
// NewLabelManager initializes labelManagerStruct and returns available interfaces
func NewLabelManager(nodeName types.NodeName, kubeClient kubernetes.Interface) Interface {
return labelManagerStruct{
nodeName: nodeName,
k8s: kubeClient,
}
}
// nodeLabelManager waits for labeling requests initiated by the driver's registration
// process.
func (lm labelManagerStruct) AddLabels(driverName string) error {
err := verifyAndAddNodeId(string(lm.nodeName), lm.k8s.CoreV1().Nodes(), driverName, string(lm.nodeName))
if err != nil {
return fmt.Errorf("failed to update node %s's annotation with error: %+v", lm.nodeName, err)
}
return nil
}
// Clones the given map and returns a new map with the given key and value added.
// Returns the given map, if annotationKey is empty.
func cloneAndAddAnnotation(
annotations map[string]string,
annotationKey,
annotationValue string) map[string]string {
if annotationKey == "" {
// Don't need to add an annotation.
return annotations
}
// Clone.
newAnnotations := map[string]string{}
for key, value := range annotations {
newAnnotations[key] = value
}
newAnnotations[annotationKey] = annotationValue
return newAnnotations
}
func verifyAndAddNodeId(
k8sNodeName string,
k8sNodesClient corev1.NodeInterface,
csiDriverName string,
csiDriverNodeId string) error {
// Add or update annotation on Node object
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
if getErr != nil {
glog.Errorf("Failed to get latest version of Node: %v", getErr)
return getErr // do not wrap error
}
var previousAnnotationValue string
if result.ObjectMeta.Annotations != nil {
previousAnnotationValue =
result.ObjectMeta.Annotations[annotationKey]
glog.V(3).Infof(
"previousAnnotationValue=%q", previousAnnotationValue)
}
existingDriverMap := map[string]string{}
if previousAnnotationValue != "" {
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKey,
previousAnnotationValue,
err)
}
}
if val, ok := existingDriverMap[csiDriverName]; ok {
if val == csiDriverNodeId {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key value {%q: %q} alredy eixst in node %q annotation, no need to update: %v",
csiDriverName,
csiDriverNodeId,
annotationKey,
previousAnnotationValue)
return nil
}
}
// Add/update annotation value
existingDriverMap[csiDriverName] = csiDriverNodeId
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return fmt.Errorf(
"failed while trying to add key value {%q: %q} to node %q annotation. Existing value: %v",
csiDriverName,
csiDriverNodeId,
annotationKey,
previousAnnotationValue)
}
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
result.ObjectMeta.Annotations,
annotationKey,
string(jsonObj))
_, updateErr := k8sNodesClient.Update(result)
if updateErr == nil {
fmt.Printf(
"Updated node %q successfully for CSI driver %q and CSI node name %q",
k8sNodeName,
csiDriverName,
csiDriverNodeId)
}
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}
// Fetches Kubernetes node API object corresponding to k8sNodeName.
// If the csiDriverName is present in the node annotation, it is removed.
func verifyAndDeleteNodeId(
k8sNodeName string,
k8sNodesClient corev1.NodeInterface,
csiDriverName string) error {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten. RetryOnConflict uses
// exponential backoff to avoid exhausting the apiserver.
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
if getErr != nil {
glog.Errorf("failed to get latest version of Node: %v", getErr)
return getErr // do not wrap error
}
var previousAnnotationValue string
if result.ObjectMeta.Annotations != nil {
previousAnnotationValue =
result.ObjectMeta.Annotations[annotationKey]
glog.V(3).Infof(
"previousAnnotationValue=%q", previousAnnotationValue)
}
existingDriverMap := map[string]string{}
if previousAnnotationValue == "" {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key %q does not exist in node %q annotation, no need to cleanup.",
csiDriverName,
annotationKey)
return nil
}
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKey,
previousAnnotationValue,
err)
}
if _, ok := existingDriverMap[csiDriverName]; !ok {
// Value already exists in node annotation, nothing more to do
glog.V(1).Infof(
"The key %q does not eixst in node %q annotation, no need to cleanup: %v",
csiDriverName,
annotationKey,
previousAnnotationValue)
return nil
}
// Add/update annotation value
delete(existingDriverMap, csiDriverName)
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return fmt.Errorf(
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
csiDriverName,
annotationKey,
previousAnnotationValue)
}
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
result.ObjectMeta.Annotations,
annotationKey,
string(jsonObj))
_, updateErr := k8sNodesClient.Update(result)
if updateErr == nil {
fmt.Printf(
"Updated node %q annotation to remove CSI driver %q.",
k8sNodeName,
csiDriverName)
}
return updateErr // do not wrap error
})
if retryErr != nil {
return fmt.Errorf("node update failed: %v", retryErr)
}
return nil
}