mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
2
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
@ -56,6 +56,7 @@ go_library(
|
||||
"//pkg/volume/util/fs:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -63,6 +64,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
@ -29,6 +30,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
@ -59,7 +59,7 @@ func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath str
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -68,7 +68,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName
|
||||
|
||||
// awsCloud.AttachDisk checks if disk is already attached to node and
|
||||
// succeeds in that case, so no need to do that separately.
|
||||
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly)
|
||||
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err)
|
||||
return "", err
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher_test.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher_test.go
generated
vendored
@ -76,15 +76,14 @@ type testcase struct {
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := aws.KubernetesVolumeID("disk")
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
spec := createVolSpec(diskName, false)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
tests := []testcase{
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil},
|
||||
attach: attachCall{diskName, nodeName, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
@ -95,7 +94,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
attach: attachCall{diskName, nodeName, readOnly, "", attachError},
|
||||
attach: attachCall{diskName, nodeName, "", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
@ -195,7 +194,6 @@ func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
|
||||
type attachCall struct {
|
||||
diskName aws.KubernetesVolumeID
|
||||
nodeName types.NodeName
|
||||
readOnly bool
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
@ -214,7 +212,7 @@ type diskIsAttachedCall struct {
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) {
|
||||
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
@ -234,12 +232,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
if expected.readOnly != readOnly {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong readOnly")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret)
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
57
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
@ -17,10 +17,11 @@ limitations under the License.
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -29,7 +30,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -94,6 +97,47 @@ func (plugin *awsElasticBlockStorePlugin) SupportsBulkVolumeVerification() bool
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, error) {
|
||||
volumeLimits := map[string]int64{
|
||||
util.EBSVolumeLimitKey: 39,
|
||||
}
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
|
||||
// if we can't fetch cloudprovider we return an error
|
||||
// hoping external CCM or admin can set it. Returning
|
||||
// default values from here will mean, no one can
|
||||
// override them.
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("No cloudprovider present")
|
||||
}
|
||||
|
||||
if cloud.ProviderName() != aws.ProviderName {
|
||||
return nil, fmt.Errorf("Expected aws cloud, found %s", cloud.ProviderName())
|
||||
}
|
||||
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
glog.V(3).Infof("Failed to get instances from cloud provider")
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance type from AWS cloud provider")
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
if ok, _ := regexp.MatchString("^[cm]5.*", instanceType); ok {
|
||||
volumeLimits[util.EBSVolumeLimitKey] = 25
|
||||
}
|
||||
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) VolumeLimitKey(spec *volume.Spec) string {
|
||||
return util.EBSVolumeLimitKey
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
@ -266,6 +310,7 @@ func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice(
|
||||
}
|
||||
|
||||
var _ volume.ExpandableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.VolumePluginWithAttachLimits = &awsElasticBlockStorePlugin{}
|
||||
|
||||
// Abstract interface to PD operations.
|
||||
type ebsManager interface {
|
||||
@ -387,12 +432,12 @@ func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) s
|
||||
// Clean up the URI to be more fs-friendly
|
||||
name := string(volumeID)
|
||||
name = strings.Replace(name, "://", "/", -1)
|
||||
return path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
|
||||
return filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
|
||||
}
|
||||
|
||||
// Reverses the mapping done in makeGlobalPDPath
|
||||
func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
|
||||
basePath := path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
|
||||
basePath := filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
|
||||
rel, err := filepath.Rel(basePath, globalPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
|
||||
@ -454,7 +499,7 @@ type awsElasticBlockStoreProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
|
||||
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
@ -508,5 +553,9 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, err
|
||||
}
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go
generated
vendored
@ -18,7 +18,6 @@ package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -30,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
@ -156,6 +156,10 @@ func (b *awsElasticBlockStoreMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *awsElasticBlockStoreMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
|
||||
// plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX
|
||||
@ -164,7 +168,7 @@ func (ebs *awsElasticBlockStore) GetGlobalMapPath(spec *volume.Spec) (string, er
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(ebs.plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName), string(volumeSource.VolumeID)), nil
|
||||
return filepath.Join(ebs.plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName), string(volumeSource.VolumeID)), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block_test.go
generated
vendored
@ -18,7 +18,7 @@ package aws_ebs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -47,7 +47,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
|
||||
//Bad Path
|
||||
badspec, err := getVolumeSpecFromGlobalMapPath("")
|
||||
@ -102,8 +102,8 @@ func TestGetPodAndPluginMapPaths(t *testing.T) {
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := path.Join(tmpVDir, testPodPath)
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := filepath.Join(tmpVDir, testPodPath)
|
||||
|
||||
spec := getTestVolume(false, true /*isBlock*/)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
@ -19,7 +19,7 @@ package aws_ebs
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -129,7 +129,7 @@ func TestPlugin(t *testing.T) {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
|
||||
volPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
@ -173,7 +173,7 @@ func TestPlugin(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error creating new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD
generated
vendored
@ -12,6 +12,7 @@ go_library(
|
||||
"attacher.go",
|
||||
"azure_common.go",
|
||||
"azure_dd.go",
|
||||
"azure_dd_block.go",
|
||||
"azure_mounter.go",
|
||||
"azure_provision.go",
|
||||
] + select({
|
||||
@ -55,13 +56,15 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/keymutex:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -69,6 +72,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -89,6 +93,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_common_test.go",
|
||||
"azure_dd_block_test.go",
|
||||
"azure_dd_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@ -97,6 +102,8 @@ go_test(
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
@ -20,14 +20,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -57,7 +57,7 @@ var getLunMutex = keymutex.NewKeyMutex()
|
||||
|
||||
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
|
||||
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure disk spec (%v)", err)
|
||||
return "", err
|
||||
@ -114,7 +114,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty
|
||||
volumeSpecMap := make(map[string]*volume.Spec)
|
||||
volumeIDList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
@ -150,36 +150,41 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty
|
||||
|
||||
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
||||
var err error
|
||||
lun, err := strconv.Atoi(devicePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s (%v)", devicePath, err)
|
||||
}
|
||||
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(a.plugin.host)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nodeName := types.NodeName(a.plugin.host.GetHostName())
|
||||
diskName := volumeSource.DiskName
|
||||
|
||||
glog.V(5).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)",
|
||||
diskName, volumeSource.DataDiskURI, nodeName, devicePath)
|
||||
lun, err := diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
glog.V(5).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun)
|
||||
exec := a.plugin.host.GetExec(a.plugin.GetPluginName())
|
||||
|
||||
io := &osIOHandler{}
|
||||
scsiHostRescan(io, exec)
|
||||
|
||||
diskName := volumeSource.DiskName
|
||||
nodeName := a.plugin.host.GetHostName()
|
||||
newDevicePath := ""
|
||||
|
||||
err = wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
if newDevicePath, err = findDiskByLun(lun, io, exec); err != nil {
|
||||
if newDevicePath, err = findDiskByLun(int(lun), io, exec); err != nil {
|
||||
return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err)
|
||||
}
|
||||
|
||||
// did we find it?
|
||||
if newDevicePath != "" {
|
||||
// the current sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on
|
||||
// Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve
|
||||
// the root case on Azure.
|
||||
formatIfNotFormatted(newDevicePath, *volumeSource.FSType, exec)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -194,13 +199,13 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string,
|
||||
// this is generalized for both managed and blob disks
|
||||
// we also prefix the hash with m/b based on disk kind
|
||||
func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if volumeSource.Kind == nil { // this spec was constructed from info on the node
|
||||
pdPath := path.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI)
|
||||
pdPath := filepath.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI)
|
||||
return pdPath, nil
|
||||
}
|
||||
|
||||
@ -241,7 +246,7 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
28
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go
generated
vendored
@ -20,10 +20,11 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
libstrings "strings"
|
||||
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -35,7 +36,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultFSType = "ext4"
|
||||
defaultStorageAccountType = storage.StandardLRS
|
||||
defaultAzureDiskKind = v1.AzureSharedBlobDisk
|
||||
defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingNone
|
||||
@ -46,6 +46,7 @@ type dataDisk struct {
|
||||
volumeName string
|
||||
diskName string
|
||||
podUID types.UID
|
||||
plugin *azureDataDiskPlugin
|
||||
}
|
||||
|
||||
var (
|
||||
@ -77,12 +78,12 @@ func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (s
|
||||
}
|
||||
// "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }"
|
||||
diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri)
|
||||
pdPath := path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName)
|
||||
pdPath := filepath.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName)
|
||||
|
||||
return pdPath, nil
|
||||
}
|
||||
|
||||
func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk {
|
||||
func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost, plugin *azureDataDiskPlugin) *dataDisk {
|
||||
var metricProvider volume.MetricsProvider
|
||||
if podUID != "" {
|
||||
metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host))
|
||||
@ -93,27 +94,20 @@ func makeDataDisk(volumeName string, podUID types.UID, diskName string, host vol
|
||||
volumeName: volumeName,
|
||||
diskName: diskName,
|
||||
podUID: podUID,
|
||||
plugin: plugin,
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
|
||||
func getVolumeSource(spec *volume.Spec) (volumeSource *v1.AzureDiskVolumeSource, readOnly bool, err error) {
|
||||
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
|
||||
return spec.Volume.AzureDisk, nil
|
||||
return spec.Volume.AzureDisk, spec.Volume.AzureDisk.ReadOnly != nil && *spec.Volume.AzureDisk.ReadOnly, nil
|
||||
}
|
||||
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
|
||||
return spec.PersistentVolume.Spec.AzureDisk, nil
|
||||
return spec.PersistentVolume.Spec.AzureDisk, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type")
|
||||
}
|
||||
|
||||
func normalizeFsType(fsType string) string {
|
||||
if fsType == "" {
|
||||
return defaultFSType
|
||||
}
|
||||
|
||||
return fsType
|
||||
return nil, false, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type")
|
||||
}
|
||||
|
||||
func normalizeKind(kind string) (v1.AzureDataDiskKind, error) {
|
||||
|
50
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go
generated
vendored
50
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go
generated
vendored
@ -20,7 +20,7 @@ package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
libstrings "strings"
|
||||
|
||||
@ -124,7 +124,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
|
||||
if lun == l {
|
||||
// find the matching LUN
|
||||
// read vendor and model to ensure it is a VHD disk
|
||||
vendorPath := path.Join(sys_path, name, "vendor")
|
||||
vendorPath := filepath.Join(sys_path, name, "vendor")
|
||||
vendorBytes, err := io.ReadFile(vendorPath)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to read device vendor, err: %v", err)
|
||||
@ -136,7 +136,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
|
||||
continue
|
||||
}
|
||||
|
||||
modelPath := path.Join(sys_path, name, "model")
|
||||
modelPath := filepath.Join(sys_path, name, "model")
|
||||
modelBytes, err := io.ReadFile(modelPath)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to read device model, err: %v", err)
|
||||
@ -149,7 +149,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
|
||||
}
|
||||
|
||||
// find a disk, validate name
|
||||
dir := path.Join(sys_path, name, "block")
|
||||
dir := filepath.Join(sys_path, name, "block")
|
||||
if dev, err := io.ReadDir(dir); err == nil {
|
||||
found := false
|
||||
devName := dev[0].Name()
|
||||
@ -178,45 +178,3 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
|
||||
notFormatted, err := diskLooksUnformatted(disk, exec)
|
||||
if err == nil && notFormatted {
|
||||
args := []string{disk}
|
||||
// Disk is unformatted so format it.
|
||||
// Use 'ext4' as the default
|
||||
if len(fstype) == 0 {
|
||||
fstype = "ext4"
|
||||
}
|
||||
if fstype == "ext4" || fstype == "ext3" {
|
||||
args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", disk}
|
||||
}
|
||||
glog.Infof("azureDisk - Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", disk, fstype, args)
|
||||
|
||||
_, err := exec.Run("mkfs."+fstype, args...)
|
||||
if err == nil {
|
||||
// the disk has been formatted successfully try to mount it again.
|
||||
glog.Infof("azureDisk - Disk successfully formatted with 'mkfs.%s %v'", fstype, args)
|
||||
} else {
|
||||
glog.Warningf("azureDisk - Error formatting volume with 'mkfs.%s %v': %v", fstype, args, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err)
|
||||
} else {
|
||||
glog.Infof("azureDisk - Disk %s already formatted, will not format", disk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func diskLooksUnformatted(disk string, exec mount.Exec) (bool, error) {
|
||||
args := []string{"-nd", "-o", "FSTYPE", disk}
|
||||
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
|
||||
dataOut, err := exec.Run("lsblk", args...)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
|
||||
return false, err
|
||||
}
|
||||
output := libstrings.TrimSpace(string(dataOut))
|
||||
return output == "", nil
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_unsupported.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_unsupported.go
generated
vendored
@ -26,6 +26,3 @@ func scsiHostRescan(io ioHandler, exec mount.Exec) {
|
||||
func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go
generated
vendored
@ -103,8 +103,12 @@ func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(fstype) == 0 {
|
||||
// Use 'NTFS' as the default
|
||||
fstype = "NTFS"
|
||||
}
|
||||
cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru", disk)
|
||||
cmd += " | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem NTFS -Confirm:$false"
|
||||
cmd += fmt.Sprintf(" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", fstype)
|
||||
output, err := exec.Run("powershell", "/c", cmd)
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output))
|
||||
|
74
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
74
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
@ -17,13 +17,18 @@ limitations under the License.
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// interface exposed by the cloud provider implementing Disk functionality
|
||||
@ -31,7 +36,7 @@ type DiskController interface {
|
||||
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
|
||||
DeleteBlobDisk(diskUri string) error
|
||||
|
||||
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error)
|
||||
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, resourceGroup string, sizeGB int, tags map[string]string) (string, error)
|
||||
DeleteManagedDisk(diskURI string) error
|
||||
|
||||
// Attaches the disk to the host machine.
|
||||
@ -51,6 +56,9 @@ type DiskController interface {
|
||||
CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error)
|
||||
// Delete a VHD blob
|
||||
DeleteVolume(diskURI string) error
|
||||
|
||||
// Expand the disk to new size
|
||||
ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
|
||||
}
|
||||
|
||||
type azureDataDiskPlugin struct {
|
||||
@ -62,6 +70,8 @@ var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.VolumePluginWithAttachLimits = &azureDataDiskPlugin{}
|
||||
var _ volume.ExpandableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
const (
|
||||
azureDataDiskPluginName = "kubernetes.io/azure-disk"
|
||||
@ -81,7 +91,7 @@ func (plugin *azureDataDiskPlugin) GetPluginName() string {
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -106,6 +116,32 @@ func (plugin *azureDataDiskPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) {
|
||||
volumeLimits := map[string]int64{
|
||||
util.AzureVolumeLimitKey: 16,
|
||||
}
|
||||
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
|
||||
// if we can't fetch cloudprovider we return an error
|
||||
// hoping external CCM or admin can set it. Returning
|
||||
// default values from here will mean, no one can
|
||||
// override them.
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("No cloudprovider present")
|
||||
}
|
||||
|
||||
if cloud.ProviderName() != azure.CloudProviderName {
|
||||
return nil, fmt.Errorf("Expected Azure cloudprovider, got %s", cloud.ProviderName())
|
||||
}
|
||||
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) VolumeLimitKey(spec *volume.Spec) string {
|
||||
return util.AzureVolumeLimitKey
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
@ -140,12 +176,12 @@ func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host)
|
||||
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host, plugin)
|
||||
|
||||
return &azureDiskDeleter{
|
||||
spec: spec,
|
||||
@ -166,11 +202,11 @@ func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host)
|
||||
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host, plugin)
|
||||
|
||||
return &azureDiskMounter{
|
||||
plugin: plugin,
|
||||
@ -181,7 +217,7 @@ func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, op
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
disk := makeDataDisk(volName, podUID, "", plugin.host)
|
||||
disk := makeDataDisk(volName, podUID, "", plugin.host, plugin)
|
||||
|
||||
return &azureDiskUnmounter{
|
||||
plugin: plugin,
|
||||
@ -189,6 +225,26 @@ func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) RequiresFSResize() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ExpandVolumeDevice(
|
||||
spec *volume.Spec,
|
||||
newSize resource.Quantity,
|
||||
oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.AzureDisk == nil {
|
||||
return oldSize, fmt.Errorf("invalid PV spec")
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(plugin.host)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
return diskController.ResizeDisk(spec.PersistentVolume.Spec.AzureDisk.DataDiskURI, oldSize, newSize)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
|
161
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.BlockVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(azureDataDiskPluginName)
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID)
|
||||
|
||||
globalMapPath := filepath.Dir(globalMapPathUUID)
|
||||
if len(globalMapPath) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
|
||||
}
|
||||
|
||||
return getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName)
|
||||
}
|
||||
|
||||
func getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName string) (*volume.Spec, error) {
|
||||
// Get volume spec information from globalMapPath
|
||||
// globalMapPath example:
|
||||
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
|
||||
// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX
|
||||
diskName := filepath.Base(globalMapPath)
|
||||
if len(diskName) <= 1 {
|
||||
return nil, fmt.Errorf("failed to get diskName from global path=%s", globalMapPath)
|
||||
}
|
||||
glog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName)
|
||||
block := v1.PersistentVolumeBlock
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: volumeName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: diskName,
|
||||
},
|
||||
},
|
||||
VolumeMode: &block,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(pv, true), nil
|
||||
}
|
||||
|
||||
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
|
||||
func (plugin *azureDataDiskPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
|
||||
// Pass empty string as dummy uid since uid isn't used in the case.
|
||||
var uid types.UID
|
||||
if pod != nil {
|
||||
uid = pod.UID
|
||||
}
|
||||
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disk := makeDataDisk(spec.Name(), podUID, volumeSource.DiskName, plugin.host, plugin)
|
||||
|
||||
return &azureDataDiskMapper{
|
||||
dataDisk: disk,
|
||||
readOnly: readOnly,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return plugin.newUnmapperInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newUnmapperInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) {
|
||||
disk := makeDataDisk(volName, podUID, "", plugin.host, plugin)
|
||||
return &azureDataDiskUnmapper{dataDisk: disk}, nil
|
||||
}
|
||||
|
||||
func (c *azureDataDiskUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type azureDataDiskUnmapper struct {
|
||||
*dataDisk
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &azureDataDiskUnmapper{}
|
||||
|
||||
type azureDataDiskMapper struct {
|
||||
*dataDisk
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &azureDataDiskMapper{}
|
||||
|
||||
func (b *azureDataDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *azureDataDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
|
||||
// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX
|
||||
func (disk *dataDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(disk.plugin.host.GetVolumeDevicePluginDir(azureDataDiskPluginName), string(volumeSource.DiskName)), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~azure
|
||||
func (disk *dataDisk) GetPodDeviceMapPath() (string, string) {
|
||||
name := azureDataDiskPluginName
|
||||
return disk.plugin.host.GetPodVolumeDeviceDir(disk.podUID, kstrings.EscapeQualifiedNameForDisk(name)), disk.volumeName
|
||||
}
|
145
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block_test.go
generated
vendored
Normal file
145
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block_test.go
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testDiskName = "disk1"
|
||||
testPVName = "pv1"
|
||||
testGlobalPath = "plugins/kubernetes.io/azure-disk/volumeDevices/disk1"
|
||||
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~azure-disk"
|
||||
)
|
||||
|
||||
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
|
||||
// make our test path for fake GlobalMapPath
|
||||
// /tmp symbolized our pluginDir
|
||||
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/azure-disk/volumeDevices/disk1
|
||||
tmpVDir, err := utiltesting.MkTmpdir("azureDiskBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
|
||||
//Bad Path
|
||||
badspec, err := getVolumeSpecFromGlobalMapPath("", "")
|
||||
if badspec != nil || err == nil {
|
||||
t.Errorf("Expected not to get spec from GlobalMapPath but did")
|
||||
}
|
||||
|
||||
// Good Path
|
||||
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath, "")
|
||||
if spec == nil || err != nil {
|
||||
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
|
||||
}
|
||||
if spec.PersistentVolume.Spec.AzureDisk.DiskName != testDiskName {
|
||||
t.Errorf("Invalid pdName from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.AzureDisk.DiskName)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
specMode := spec.PersistentVolume.Spec.VolumeMode
|
||||
if &specMode == nil {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", &specMode, block)
|
||||
}
|
||||
if *specMode != block {
|
||||
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestVolume(readOnly bool, path string, isBlock bool) *volume.Spec {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPVName,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: testDiskName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
pv.Spec.VolumeMode = &blockMode
|
||||
}
|
||||
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
||||
}
|
||||
|
||||
func TestGetPodAndPluginMapPaths(t *testing.T) {
|
||||
tmpVDir, err := utiltesting.MkTmpdir("azureDiskBlockTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := filepath.Join(tmpVDir, testPodPath)
|
||||
|
||||
spec := getTestVolume(false, tmpVDir, true /*isBlock*/)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
|
||||
plug, err := plugMgr.FindMapperPluginByName(azureDataDiskPluginName)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpVDir)
|
||||
t.Fatalf("Can't find the plugin by name: %q", azureDataDiskPluginName)
|
||||
}
|
||||
if plug.GetPluginName() != azureDataDiskPluginName {
|
||||
t.Fatalf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mapper == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
//GetGlobalMapPath
|
||||
gMapPath, err := mapper.GetGlobalMapPath(spec)
|
||||
if err != nil || len(gMapPath) == 0 {
|
||||
t.Fatalf("Invalid GlobalMapPath from spec: %s, error: %v", spec.PersistentVolume.Spec.AzureDisk.DiskName, err)
|
||||
}
|
||||
if gMapPath != expectedGlobalPath {
|
||||
t.Errorf("Failed to get GlobalMapPath: %s, expected %s", gMapPath, expectedGlobalPath)
|
||||
}
|
||||
|
||||
//GetPodDeviceMapPath
|
||||
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
|
||||
if gDevicePath != expectedPodPath {
|
||||
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
|
||||
}
|
||||
if gVolName != testPVName {
|
||||
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
|
||||
}
|
||||
}
|
4
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go
generated
vendored
@ -44,7 +44,7 @@ var _ volume.Mounter = &azureDiskMounter{}
|
||||
|
||||
func (m *azureDiskMounter) GetAttributes() volume.Attributes {
|
||||
readOnly := false
|
||||
volumeSource, err := getVolumeSource(m.spec)
|
||||
volumeSource, _, err := getVolumeSource(m.spec)
|
||||
if err != nil {
|
||||
glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err)
|
||||
} else if volumeSource.ReadOnly != nil {
|
||||
@ -71,7 +71,7 @@ func (m *azureDiskMounter) GetPath() string {
|
||||
|
||||
func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
mounter := m.plugin.host.GetMounter(m.plugin.GetPluginName())
|
||||
volumeSource, err := getVolumeSource(m.spec)
|
||||
volumeSource, _, err := getVolumeSource(m.spec)
|
||||
|
||||
if err != nil {
|
||||
glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go
generated
vendored
@ -17,12 +17,15 @@ limitations under the License.
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
@ -46,7 +49,7 @@ func (d *azureDiskDeleter) GetPath() string {
|
||||
}
|
||||
|
||||
func (d *azureDiskDeleter) Delete() error {
|
||||
volumeSource, err := getVolumeSource(d.spec)
|
||||
volumeSource, _, err := getVolumeSource(d.spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -65,7 +68,7 @@ func (d *azureDiskDeleter) Delete() error {
|
||||
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI)
|
||||
}
|
||||
|
||||
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
@ -92,6 +95,7 @@ func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
cachingMode v1.AzureDataDiskCachingMode
|
||||
strKind string
|
||||
err error
|
||||
resourceGroup string
|
||||
)
|
||||
// maxLength = 79 - (4 for ".vhd") = 75
|
||||
name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
|
||||
@ -115,13 +119,14 @@ func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
cachingMode = v1.AzureDataDiskCachingMode(v)
|
||||
case volume.VolumeParameterFSType:
|
||||
fsType = strings.ToLower(v)
|
||||
case "resourcegroup":
|
||||
resourceGroup = v
|
||||
default:
|
||||
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
|
||||
}
|
||||
}
|
||||
|
||||
// normalize values
|
||||
fsType = normalizeFsType(fsType)
|
||||
skuName, err := normalizeStorageAccountType(storageAccountType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -141,10 +146,18 @@ func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resourceGroup != "" && kind != v1.AzureManagedDisk {
|
||||
return nil, errors.New("StorageClass option 'resourceGroup' can be used only for managed disks")
|
||||
}
|
||||
|
||||
// create disk
|
||||
diskURI := ""
|
||||
if kind == v1.AzureManagedDisk {
|
||||
diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags))
|
||||
tags := make(map[string]string)
|
||||
if p.options.CloudTags != nil {
|
||||
tags = *(p.options.CloudTags)
|
||||
}
|
||||
diskURI, err = diskController.CreateManagedDisk(name, skuName, resourceGroup, requestGB, tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -188,5 +201,10 @@ func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
MountOptions: p.options.MountOptions,
|
||||
},
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
pv.Spec.VolumeMode = p.options.PVC.Spec.VolumeMode
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
@ -18,6 +18,7 @@ package azure_file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
@ -149,7 +150,7 @@ func (plugin *azureFilePlugin) ExpandVolumeDevice(
|
||||
newSize resource.Quantity,
|
||||
oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
|
||||
if spec.PersistentVolume != nil || spec.PersistentVolume.Spec.AzureFile == nil {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.AzureFile == nil {
|
||||
return oldSize, fmt.Errorf("invalid PV spec")
|
||||
}
|
||||
shareName := spec.PersistentVolume.Spec.AzureFile.ShareName
|
||||
@ -241,8 +242,20 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
return nil
|
||||
// testing original mount point, make sure the mount link is valid
|
||||
if _, err := ioutil.ReadDir(dir); err == nil {
|
||||
glog.V(4).Infof("azureFile - already mounted to target %s", dir)
|
||||
return nil
|
||||
}
|
||||
// mount link is invalid, now unmount and remount later
|
||||
glog.Warningf("azureFile - ReadDir %s failed with %v, unmount this directory", dir, err)
|
||||
if err := b.mounter.Unmount(dir); err != nil {
|
||||
glog.Errorf("azureFile - Unmount directory %s failed with %v", dir, err)
|
||||
return err
|
||||
}
|
||||
notMnt = true
|
||||
}
|
||||
|
||||
var accountKey, accountName string
|
||||
if accountName, accountKey, err = b.util.GetAzureCredentials(b.plugin.host, b.secretNamespace, b.secretName); err != nil {
|
||||
return err
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file_test.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -149,7 +149,7 @@ func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-file/vol1")
|
||||
volPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-file/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go
generated
vendored
@ -131,10 +131,13 @@ type azureFileProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &azureFileProvisioner{}
|
||||
|
||||
func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
|
||||
}
|
||||
if util.CheckPersistentVolumeClaimModeBlock(a.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", a.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
var sku, location, account string
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go
generated
vendored
@ -405,6 +405,8 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
|
||||
mountArgs = append(mountArgs, mountpoint)
|
||||
mountArgs = append(mountArgs, "-r")
|
||||
mountArgs = append(mountArgs, cephfsVolume.path)
|
||||
mountArgs = append(mountArgs, "--id")
|
||||
mountArgs = append(mountArgs, cephfsVolume.id)
|
||||
|
||||
glog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs)
|
||||
command := exec.Command("ceph-fuse", mountArgs...)
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
@ -120,13 +120,11 @@ func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string
|
||||
}
|
||||
|
||||
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := volumeSource.VolumeID
|
||||
|
||||
instanceID, err := attacher.nodeInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -175,15 +173,15 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
|
||||
volumeSpecMap := make(map[string]*volume.Spec)
|
||||
volumeIDList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
volumeIDList = append(volumeIDList, volumeSource.VolumeID)
|
||||
volumeIDList = append(volumeIDList, volumeID)
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumeSpecMap[volumeSource.VolumeID] = spec
|
||||
volumeSpecMap[volumeID] = spec
|
||||
}
|
||||
|
||||
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
|
||||
@ -207,13 +205,11 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
|
||||
|
||||
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
||||
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeID := volumeSource.VolumeID
|
||||
|
||||
if devicePath == "" {
|
||||
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty", volumeID)
|
||||
}
|
||||
@ -252,12 +248,12 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
|
||||
|
||||
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
|
||||
spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDName(attacher.host, volumeSource.VolumeID), nil
|
||||
return makeGlobalPDName(attacher.host, volumeID), nil
|
||||
}
|
||||
|
||||
// FIXME: this method can be further pruned.
|
||||
@ -275,7 +271,7 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
|
||||
}
|
||||
}
|
||||
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
_, volumeFSType, readOnly, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -287,7 +283,7 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
|
||||
if notMnt {
|
||||
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeFSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher_test.go
generated
vendored
@ -393,7 +393,7 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
|
||||
PersistentVolume: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
Cinder: &v1.CinderPersistentVolumeSource{
|
||||
VolumeID: name,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
@ -712,10 +712,6 @@ func (instances *instances) NodeAddressesByProviderID(ctx context.Context, provi
|
||||
return []v1.NodeAddress{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return instances.instanceID, nil
|
||||
}
|
||||
@ -732,12 +728,16 @@ func (instances *instances) InstanceExistsByProviderID(ctx context.Context, prov
|
||||
return false, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (instances *instances) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (instances *instances) List(filter string) ([]types.NodeName, error) {
|
||||
return []types.NodeName{}, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (instances *instances) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return errors.New("Not implemented")
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
func (instances *instances) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
67
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go
generated
vendored
@ -79,6 +79,10 @@ const (
|
||||
cinderVolumePluginName = "kubernetes.io/cinder"
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(cinderVolumePluginName), volName)
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.volumeLocks = keymutex.NewKeyMutex()
|
||||
@ -90,12 +94,12 @@ func (plugin *cinderPlugin) GetPluginName() string {
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.VolumeID, nil
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
@ -125,22 +129,20 @@ func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
cinder, readOnly, err := getVolumeSource(spec)
|
||||
pdName, fsType, readOnly, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pdName := cinder.VolumeID
|
||||
fsType := cinder.FSType
|
||||
|
||||
return &cinderVolumeMounter{
|
||||
cinderVolume: &cinderVolume{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
pdName: pdName,
|
||||
mounter: mounter,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
pdName: pdName,
|
||||
mounter: mounter,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
@ -154,11 +156,12 @@ func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volu
|
||||
func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &cinderVolumeUnmounter{
|
||||
&cinderVolume{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
@ -242,7 +245,7 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*
|
||||
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
cinder, _, err := getVolumeSource(spec)
|
||||
volumeID, _, _, err := getVolumeInfo(spec)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
@ -251,12 +254,12 @@ func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resour
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
expandedSize, err := cloud.ExpandVolume(cinder.VolumeID, oldSize, newSize)
|
||||
expandedSize, err := cloud.ExpandVolume(volumeID, oldSize, newSize)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("volume %s expanded to new size %d successfully", cinder.VolumeID, int(newSize.Value()))
|
||||
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value()))
|
||||
return expandedSize, nil
|
||||
}
|
||||
|
||||
@ -303,7 +306,7 @@ type cinderVolume struct {
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
blockDeviceMounter mount.Interface
|
||||
plugin *cinderPlugin
|
||||
volume.MetricsNil
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
func (b *cinderVolumeMounter) GetAttributes() volume.Attributes {
|
||||
@ -397,8 +400,7 @@ func makeGlobalPDName(host volume.VolumeHost, devName string) string {
|
||||
}
|
||||
|
||||
func (cd *cinderVolume) GetPath() string {
|
||||
name := cinderVolumePluginName
|
||||
return cd.plugin.host.GetPodVolumeDir(cd.podUID, kstrings.EscapeQualifiedNameForDisk(name), cd.volName)
|
||||
return getPath(cd.podUID, cd.volName, cd.plugin.host)
|
||||
}
|
||||
|
||||
type cinderVolumeUnmounter struct {
|
||||
@ -484,8 +486,7 @@ type cinderVolumeDeleter struct {
|
||||
var _ volume.Deleter = &cinderVolumeDeleter{}
|
||||
|
||||
func (r *cinderVolumeDeleter) GetPath() string {
|
||||
name := cinderVolumePluginName
|
||||
return r.plugin.host.GetPodVolumeDir(r.podUID, kstrings.EscapeQualifiedNameForDisk(name), r.volName)
|
||||
return getPath(r.podUID, r.volName, r.plugin.host)
|
||||
}
|
||||
|
||||
func (r *cinderVolumeDeleter) Delete() error {
|
||||
@ -499,11 +500,15 @@ type cinderVolumeProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &cinderVolumeProvisioner{}
|
||||
|
||||
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -524,7 +529,7 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
Cinder: &v1.CinderPersistentVolumeSource{
|
||||
VolumeID: volumeID,
|
||||
FSType: fstype,
|
||||
ReadOnly: false,
|
||||
@ -540,13 +545,13 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.CinderVolumeSource, bool, error) {
|
||||
func getVolumeInfo(spec *volume.Spec) (string, string, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.Cinder != nil {
|
||||
return spec.Volume.Cinder, spec.Volume.Cinder.ReadOnly, nil
|
||||
return spec.Volume.Cinder.VolumeID, spec.Volume.Cinder.FSType, spec.Volume.Cinder.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.Cinder != nil {
|
||||
return spec.PersistentVolume.Spec.Cinder, spec.ReadOnly, nil
|
||||
return spec.PersistentVolume.Spec.Cinder.VolumeID, spec.PersistentVolume.Spec.Cinder.FSType, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a Cinder volume type")
|
||||
return "", "", false, fmt.Errorf("Spec does not reference a Cinder volume type")
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go
generated
vendored
@ -51,7 +51,7 @@ func TestCanSupport(t *testing.T) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderVolumeSource{}}}}}) {
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderPersistentVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
@ -196,7 +196,7 @@ func TestPlugin(t *testing.T) {
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go
generated
vendored
@ -191,12 +191,6 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optional := b.source.Optional != nil && *b.source.Optional
|
||||
configMap, err := b.getConfigMap(b.pod.Namespace, b.source.Name)
|
||||
@ -213,6 +207,13 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBytes := totalBytes(configMap)
|
||||
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
@ -243,7 +244,6 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"csi_attacher.go",
|
||||
"csi_block.go",
|
||||
"csi_client.go",
|
||||
"csi_mounter.go",
|
||||
"csi_plugin.go",
|
||||
@ -12,13 +13,14 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csi/labelmanager:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
@ -26,6 +28,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -34,6 +37,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"csi_attacher_test.go",
|
||||
"csi_block_test.go",
|
||||
"csi_client_test.go",
|
||||
"csi_mounter_test.go",
|
||||
"csi_plugin_test.go",
|
||||
@ -43,15 +47,17 @@ go_test(
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csi/fake:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
@ -70,6 +76,7 @@ filegroup(
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/volume/csi/fake:all-srcs",
|
||||
"//pkg/volume/csi/labelmanager:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
|
131
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
131
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -27,7 +28,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
grpctx "golang.org/x/net/context"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -102,17 +102,11 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
|
||||
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
|
||||
}
|
||||
|
||||
// probe for attachment update here
|
||||
// NOTE: any error from waiting for attachment is logged only. This is because
|
||||
// the primary intent of the enclosing method is to create VolumeAttachment.
|
||||
// DONOT return that error here as it is mitigated in attacher.WaitForAttach.
|
||||
volAttachmentOK := true
|
||||
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
|
||||
volAttachmentOK = false
|
||||
glog.Error(log("attacher.Attach attempted to wait for attachment to be ready, but failed with: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment verified=%t: attachment object [%s]", volAttachmentOK, attachID))
|
||||
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
|
||||
|
||||
return attachID, nil
|
||||
}
|
||||
@ -251,7 +245,7 @@ func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
return deviceMountPath, nil
|
||||
}
|
||||
|
||||
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) (err error) {
|
||||
glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
||||
|
||||
mounted, err := isDirMounted(c.plugin, deviceMountPath)
|
||||
@ -275,31 +269,57 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
||||
return err
|
||||
}
|
||||
|
||||
if c.csiClient == nil {
|
||||
if csiSource.Driver == "" {
|
||||
return fmt.Errorf("attacher.MountDevice failed, driver name is empty")
|
||||
// Store volume metadata for UnmountDevice. Keep it around even if the
|
||||
// driver does not support NodeStage, UnmountDevice still needs it.
|
||||
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
|
||||
dataDir := filepath.Dir(deviceMountPath)
|
||||
data := map[string]string{
|
||||
volDataKey.volHandle: csiSource.VolumeHandle,
|
||||
volDataKey.driverName: csiSource.Driver,
|
||||
}
|
||||
if err = saveVolumeData(dataDir, volDataFileName, data); err != nil {
|
||||
glog.Error(log("failed to save volume info data: %v", err))
|
||||
if cleanerr := os.RemoveAll(dataDir); err != nil {
|
||||
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr))
|
||||
}
|
||||
addr := fmt.Sprintf(csiAddrTemplate, csiSource.Driver)
|
||||
c.csiClient = newCsiDriverClient("unix", addr)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// clean up metadata
|
||||
glog.Errorf(log("attacher.MountDevice failed: %v", err))
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if c.csiClient == nil {
|
||||
c.csiClient = newCsiDriverClient(csiSource.Driver)
|
||||
}
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to check STAGE_UNSTAGE_VOLUME: %v", err))
|
||||
return err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start MountDevice
|
||||
if deviceMountPath == "" {
|
||||
return fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
|
||||
err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty")
|
||||
return err
|
||||
}
|
||||
|
||||
nodeName := string(c.plugin.host.GetNodeName())
|
||||
@ -308,22 +328,24 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed while getting volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
return err // This err already has enough context ("VolumeAttachment xyz not found")
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return errors.New("no existing VolumeAttachment found")
|
||||
err = errors.New("no existing VolumeAttachment found")
|
||||
return err
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
// create target_dir before call to NodeStageVolume
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
|
||||
return err
|
||||
nodeStageSecrets := map[string]string{}
|
||||
if csiSource.NodeStageSecretRef != nil {
|
||||
nodeStageSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fetching NodeStageSecretRef %s/%s failed: %v",
|
||||
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
@ -332,15 +354,6 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
||||
}
|
||||
|
||||
fsType := csiSource.FSType
|
||||
if len(fsType) == 0 {
|
||||
fsType = defaultFSType
|
||||
}
|
||||
|
||||
nodeStageSecrets := map[string]string{}
|
||||
if csiSource.NodeStageSecretRef != nil {
|
||||
nodeStageSecrets = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
|
||||
}
|
||||
|
||||
err = csi.NodeStageVolume(ctx,
|
||||
csiSource.VolumeHandle,
|
||||
publishVolumeInfo,
|
||||
@ -351,11 +364,6 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
||||
csiSource.VolumeAttributes)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.MountDevice failed: %v", err))
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
glog.Error(log("attacher.MountDevice failed to remove mount dir after a NodeStageVolume() error [%s]: %v", deviceMountPath, err))
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@ -381,6 +389,11 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
volID := parts[1]
|
||||
attachID := getAttachmentName(volID, driverName, string(nodeName))
|
||||
if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
// object deleted or never existed, done
|
||||
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
|
||||
return nil
|
||||
}
|
||||
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
@ -463,19 +476,29 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
|
||||
|
||||
// Setup
|
||||
driverName, volID, err := getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
||||
return err
|
||||
var driverName, volID string
|
||||
dataDir := filepath.Dir(deviceMountPath)
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err == nil {
|
||||
driverName = data[volDataKey.driverName]
|
||||
volID = data[volDataKey.volHandle]
|
||||
} else {
|
||||
glog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err))
|
||||
|
||||
// The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server
|
||||
driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.csiClient == nil {
|
||||
addr := fmt.Sprintf(csiAddrTemplate, driverName)
|
||||
c.csiClient = newCsiDriverClient("unix", addr)
|
||||
c.csiClient = newCsiDriverClient(driverName)
|
||||
}
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
@ -485,6 +508,11 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
|
||||
// Just delete the global directory + json file
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -498,11 +526,16 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the global directory + json file
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasStageUnstageCapability(ctx grpctx.Context, csi csiClient) (bool, error) {
|
||||
func hasStageUnstageCapability(ctx context.Context, csi csiClient) (bool, error) {
|
||||
capabilities, err := csi.NodeGetCapabilities(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
226
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher_test.go
generated
vendored
226
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher_test.go
generated
vendored
@ -18,6 +18,7 @@ package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -26,13 +27,13 @@ import (
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
@ -59,12 +60,13 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach
|
||||
func TestAttacherAttach(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
nodeName string
|
||||
driverName string
|
||||
volumeName string
|
||||
attachID string
|
||||
shouldFail bool
|
||||
name string
|
||||
nodeName string
|
||||
driverName string
|
||||
volumeName string
|
||||
attachID string
|
||||
injectAttacherError bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "test ok 1",
|
||||
@ -104,13 +106,22 @@ func TestAttacherAttach(t *testing.T) {
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "attacher error",
|
||||
nodeName: "node02",
|
||||
driverName: "driver02",
|
||||
volumeName: "vol02",
|
||||
attachID: getAttachmentName("vol02", "driver02", "node02"),
|
||||
injectAttacherError: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
// attacher loop
|
||||
for i, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
|
||||
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
@ -127,6 +138,9 @@ func TestAttacherAttach(t *testing.T) {
|
||||
if !fail && err != nil {
|
||||
t.Errorf("expecting no failure, but got err: %v", err)
|
||||
}
|
||||
if fail && err == nil {
|
||||
t.Errorf("expecting failure, but got no err")
|
||||
}
|
||||
if attachID != id && !fail {
|
||||
t.Errorf("expecting attachID %v, got %v", id, attachID)
|
||||
}
|
||||
@ -154,7 +168,14 @@ func TestAttacherAttach(t *testing.T) {
|
||||
if attach == nil {
|
||||
t.Logf("attachment not found for id:%v", tc.attachID)
|
||||
} else {
|
||||
attach.Status.Attached = true
|
||||
if tc.injectAttacherError {
|
||||
attach.Status.Attached = false
|
||||
attach.Status.AttachError = &storage.VolumeError{
|
||||
Message: "attacher error",
|
||||
}
|
||||
} else {
|
||||
attach.Status.Attached = true
|
||||
}
|
||||
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
@ -165,17 +186,7 @@ func TestAttacherAttach(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
|
||||
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
nodeName := "test-node"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
initAttached bool
|
||||
@ -183,21 +194,18 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
trigerWatchEventTime time.Duration
|
||||
initAttachErr *storage.VolumeError
|
||||
finalAttachErr *storage.VolumeError
|
||||
sleepTime time.Duration
|
||||
timeout time.Duration
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "attach success at get",
|
||||
initAttached: true,
|
||||
sleepTime: 10 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
name: "attachment error ant get",
|
||||
initAttachErr: &storage.VolumeError{Message: "missing volume"},
|
||||
sleepTime: 10 * time.Millisecond,
|
||||
timeout: 30 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
@ -207,7 +215,6 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
finalAttached: true,
|
||||
trigerWatchEventTime: 5 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
sleepTime: 5 * time.Millisecond,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
@ -216,7 +223,6 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
finalAttached: false,
|
||||
finalAttachErr: &storage.VolumeError{Message: "missing volume"},
|
||||
trigerWatchEventTime: 5 * time.Millisecond,
|
||||
sleepTime: 10 * time.Millisecond,
|
||||
timeout: 30 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
@ -226,13 +232,19 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
finalAttached: true,
|
||||
trigerWatchEventTime: 100 * time.Millisecond,
|
||||
timeout: 50 * time.Millisecond,
|
||||
sleepTime: 5 * time.Millisecond,
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
fakeWatcher.Reset()
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
attacher, err := plug.NewAttacher()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new attacher: %v", err)
|
||||
}
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
t.Logf("running test: %v", tc.name)
|
||||
pvName := fmt.Sprintf("test-pv-%d", i)
|
||||
volID := fmt.Sprintf("test-vol-%d", i)
|
||||
@ -240,18 +252,21 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = tc.initAttached
|
||||
attachment.Status.AttachError = tc.initAttachErr
|
||||
csiAttacher.waitSleepTime = tc.sleepTime
|
||||
_, err := csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
_, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to attach: %v", err)
|
||||
}
|
||||
|
||||
trigerWatchEventTime := tc.trigerWatchEventTime
|
||||
finalAttached := tc.finalAttached
|
||||
finalAttachErr := tc.finalAttachErr
|
||||
// after timeout, fakeWatcher will be closed by csiAttacher.waitForVolumeAttachment
|
||||
if tc.trigerWatchEventTime > 0 && tc.trigerWatchEventTime < tc.timeout {
|
||||
go func() {
|
||||
time.Sleep(tc.trigerWatchEventTime)
|
||||
attachment.Status.Attached = tc.finalAttached
|
||||
attachment.Status.AttachError = tc.finalAttachErr
|
||||
time.Sleep(trigerWatchEventTime)
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = finalAttached
|
||||
attachment.Status.AttachError = finalAttachErr
|
||||
fakeWatcher.Modify(attachment)
|
||||
}()
|
||||
}
|
||||
@ -337,16 +352,33 @@ func TestAttacherDetach(t *testing.T) {
|
||||
volID string
|
||||
attachID string
|
||||
shouldFail bool
|
||||
reactor func(action core.Action) (handled bool, ret runtime.Object, err error)
|
||||
}{
|
||||
{name: "normal test", volID: "vol-001", attachID: getAttachmentName("vol-001", testDriver, nodeName)},
|
||||
{name: "normal test 2", volID: "vol-002", attachID: getAttachmentName("vol-002", testDriver, nodeName)},
|
||||
{name: "object not found", volID: "vol-001", attachID: getAttachmentName("vol-002", testDriver, nodeName), shouldFail: true},
|
||||
{name: "object not found", volID: "vol-non-existing", attachID: getAttachmentName("vol-003", testDriver, nodeName)},
|
||||
{
|
||||
name: "API error",
|
||||
volID: "vol-004",
|
||||
attachID: getAttachmentName("vol-004", testDriver, nodeName),
|
||||
shouldFail: true, // All other API errors should be propagated to caller
|
||||
reactor: func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
// return Forbidden to all DELETE requests
|
||||
if action.Matches("delete", "volumeattachments") {
|
||||
return true, nil, apierrs.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error"))
|
||||
}
|
||||
return false, nil, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("running test: %v", tc.name)
|
||||
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
|
||||
plug, fakeWatcher, tmpDir, client := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
if tc.reactor != nil {
|
||||
client.PrependReactor("*", "*", tc.reactor)
|
||||
}
|
||||
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
@ -391,7 +423,7 @@ func TestAttacherDetach(t *testing.T) {
|
||||
func TestAttacherGetDeviceMountPath(t *testing.T) {
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, _, tmpDir := newTestWatchPlugin(t)
|
||||
plug, _, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
@ -474,7 +506,7 @@ func TestAttacherMountDevice(t *testing.T) {
|
||||
devicePath: "",
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
shouldFail: false,
|
||||
},
|
||||
{
|
||||
testName: "no device mount path",
|
||||
@ -491,10 +523,6 @@ func TestAttacherMountDevice(t *testing.T) {
|
||||
deviceMountPath: "path2",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set no vars should not fail",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@ -504,7 +532,7 @@ func TestAttacherMountDevice(t *testing.T) {
|
||||
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, fakeWatcher, tmpDir := newTestWatchPlugin(t)
|
||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
@ -539,7 +567,7 @@ func TestAttacherMountDevice(t *testing.T) {
|
||||
if !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
}
|
||||
return
|
||||
continue
|
||||
}
|
||||
if err == nil && tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
@ -551,8 +579,8 @@ func TestAttacherMountDevice(t *testing.T) {
|
||||
numStaged = 0
|
||||
}
|
||||
|
||||
cdc := csiAttacher.csiClient.(*csiDriverClient)
|
||||
staged := cdc.nodeClient.(*fake.NodeClient).GetNodeStagedVolumes()
|
||||
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
|
||||
staged := cdc.nodeClient.GetNodeStagedVolumes()
|
||||
if len(staged) != numStaged {
|
||||
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", numStaged, len(staged))
|
||||
}
|
||||
@ -573,42 +601,52 @@ func TestAttacherUnmountDevice(t *testing.T) {
|
||||
testName string
|
||||
volID string
|
||||
deviceMountPath string
|
||||
jsonFile string
|
||||
createPV bool
|
||||
stageUnstageSet bool
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
testName: "normal",
|
||||
testName: "normal, json file exists",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: `{"driverName": "csi", "volumeHandle":"project/zone/test-vol1"}`,
|
||||
createPV: false,
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "no device mount path",
|
||||
testName: "normal, json file doesn't exist -> use PV",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: "",
|
||||
createPV: true,
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "invalid json -> use PV",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: `{"driverName"}}`,
|
||||
createPV: true,
|
||||
stageUnstageSet: true,
|
||||
},
|
||||
{
|
||||
testName: "no json, no PV.volID",
|
||||
volID: "",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: "",
|
||||
createPV: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "missing part of device mount path",
|
||||
testName: "no json, no PV",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
|
||||
jsonFile: "",
|
||||
createPV: false,
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "test volume name mismatch",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set",
|
||||
volID: "project/zone/test-vol1",
|
||||
deviceMountPath: "/tmp/csi-test049507108/plugins/csi/pv/test-pv-name/globalmount",
|
||||
stageUnstageSet: false,
|
||||
},
|
||||
{
|
||||
testName: "stage_unstage not set no vars should not fail",
|
||||
stageUnstageSet: false,
|
||||
@ -619,7 +657,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
|
||||
t.Logf("Running test case: %s", tc.testName)
|
||||
// Setup
|
||||
// Create a new attacher
|
||||
plug, _, tmpDir := newTestWatchPlugin(t)
|
||||
plug, _, tmpDir, _ := newTestWatchPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
attacher, err0 := plug.NewAttacher()
|
||||
if err0 != nil {
|
||||
@ -628,29 +666,45 @@ func TestAttacherUnmountDevice(t *testing.T) {
|
||||
csiAttacher := attacher.(*csiAttacher)
|
||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
||||
|
||||
// Add the volume to NodeStagedVolumes
|
||||
cdc := csiAttacher.csiClient.(*csiDriverClient)
|
||||
cdc.nodeClient.(*fake.NodeClient).AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
|
||||
if tc.deviceMountPath != "" {
|
||||
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
|
||||
}
|
||||
|
||||
// Make the PV for this object
|
||||
// Add the volume to NodeStagedVolumes
|
||||
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
|
||||
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath)
|
||||
|
||||
// Make JSON for this object
|
||||
if tc.deviceMountPath != "" {
|
||||
if err := os.MkdirAll(tc.deviceMountPath, 0755); err != nil {
|
||||
t.Fatalf("error creating directory %s: %s", tc.deviceMountPath, err)
|
||||
}
|
||||
}
|
||||
dir := filepath.Dir(tc.deviceMountPath)
|
||||
// dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}
|
||||
pvName := filepath.Base(dir)
|
||||
pv := makeTestPV(pvName, 5, "csi", tc.volID)
|
||||
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
|
||||
if err != nil && !tc.shouldFail {
|
||||
t.Fatalf("Failed to create PV: %v", err)
|
||||
if tc.jsonFile != "" {
|
||||
dataPath := filepath.Join(dir, volDataFileName)
|
||||
if err := ioutil.WriteFile(dataPath, []byte(tc.jsonFile), 0644); err != nil {
|
||||
t.Fatalf("error creating %s: %s", dataPath, err)
|
||||
}
|
||||
}
|
||||
if tc.createPV {
|
||||
// Make the PV for this object
|
||||
pvName := filepath.Base(dir)
|
||||
pv := makeTestPV(pvName, 5, "csi", tc.volID)
|
||||
_, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv)
|
||||
if err != nil && !tc.shouldFail {
|
||||
t.Fatalf("Failed to create PV: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run
|
||||
err = csiAttacher.UnmountDevice(tc.deviceMountPath)
|
||||
|
||||
err := csiAttacher.UnmountDevice(tc.deviceMountPath)
|
||||
// Verify
|
||||
if err != nil {
|
||||
if !tc.shouldFail {
|
||||
t.Errorf("test should not fail, but error occurred: %v", err)
|
||||
}
|
||||
return
|
||||
continue
|
||||
}
|
||||
if err == nil && tc.shouldFail {
|
||||
t.Errorf("test should fail, but no error occurred")
|
||||
@ -661,7 +715,7 @@ func TestAttacherUnmountDevice(t *testing.T) {
|
||||
if !tc.stageUnstageSet {
|
||||
expectedSet = 1
|
||||
}
|
||||
staged := cdc.nodeClient.(*fake.NodeClient).GetNodeStagedVolumes()
|
||||
staged := cdc.nodeClient.GetNodeStagedVolumes()
|
||||
if len(staged) != expectedSet {
|
||||
t.Errorf("got wrong number of staged volumes, expecting %v got: %v", expectedSet, len(staged))
|
||||
}
|
||||
@ -673,18 +727,30 @@ func TestAttacherUnmountDevice(t *testing.T) {
|
||||
t.Errorf("could not find expected staged volume: %s", tc.volID)
|
||||
}
|
||||
|
||||
if tc.jsonFile != "" && !tc.shouldFail {
|
||||
dataPath := filepath.Join(dir, volDataFileName)
|
||||
if _, err := os.Stat(dataPath); !os.IsNotExist(err) {
|
||||
if err != nil {
|
||||
t.Errorf("error checking file %s: %s", dataPath, err)
|
||||
} else {
|
||||
t.Errorf("json file %s should not exists, but it does", dataPath)
|
||||
}
|
||||
} else {
|
||||
t.Logf("json file %s was correctly removed", dataPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create a plugin mgr to load plugins and setup a fake client
|
||||
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.FakeWatcher, string) {
|
||||
func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.RaceFreeFakeWatcher, string, *fakeclient.Clientset) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't create temp dir: %v", err)
|
||||
}
|
||||
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
fakeWatcher := watch.NewFake()
|
||||
fakeWatcher := watch.NewRaceFreeFake()
|
||||
fakeClient.Fake.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatcher, nil))
|
||||
fakeClient.Fake.WatchReactionChain = fakeClient.Fake.WatchReactionChain[:1]
|
||||
host := volumetest.NewFakeVolumeHost(
|
||||
@ -705,5 +771,5 @@ func newTestWatchPlugin(t *testing.T) (*csiPlugin, *watch.FakeWatcher, string) {
|
||||
t.Fatalf("cannot assert plugin to be type csiPlugin")
|
||||
}
|
||||
|
||||
return csiPlug, fakeWatcher, tmpDir
|
||||
return csiPlug, fakeWatcher, tmpDir, fakeClient
|
||||
}
|
||||
|
283
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
generated
vendored
Normal file
283
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
type csiBlockMapper struct {
|
||||
k8s kubernetes.Interface
|
||||
csiClient csiClient
|
||||
plugin *csiPlugin
|
||||
driverName string
|
||||
specName string
|
||||
volumeID string
|
||||
readOnly bool
|
||||
spec *volume.Spec
|
||||
podUID types.UID
|
||||
volumeInfo map[string]string
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeMapper = &csiBlockMapper{}
|
||||
|
||||
// GetGlobalMapPath returns a path (on the node) where the devicePath will be symlinked to
|
||||
// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}
|
||||
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host)
|
||||
glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod's device map path and volume name
|
||||
// path: pods/{podUid}/volumeDevices/kubernetes.io~csi/, {volumeID}
|
||||
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
|
||||
path, specName := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName
|
||||
glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath = %s", path))
|
||||
return path, specName
|
||||
}
|
||||
|
||||
// SetUpDevice ensures the device is attached returns path where the device is located.
|
||||
func (m *csiBlockMapper) SetUpDevice() (string, error) {
|
||||
if !m.plugin.blockEnabled {
|
||||
return "", errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.SetupDevice called"))
|
||||
|
||||
if m.spec == nil {
|
||||
glog.Error(log("blockMapper.Map spec is nil"))
|
||||
return "", fmt.Errorf("spec is nil")
|
||||
}
|
||||
csiSource, err := getCSISourceFromSpec(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
globalMapPath, err := m.GetGlobalMapPath(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
csi := m.csiClient
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
||||
return "", err
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
glog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Start MountDevice
|
||||
nodeName := string(m.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return "", errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
nodeStageSecrets := map[string]string{}
|
||||
if csiSource.NodeStageSecretRef != nil {
|
||||
nodeStageSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodeStageSecretRef)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get NodeStageSecretRef %s/%s: %v",
|
||||
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create globalMapPath before call to NodeStageVolume
|
||||
if err := os.MkdirAll(globalMapPath, 0750); err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err))
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if m.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
err = csi.NodeStageVolume(ctx,
|
||||
csiSource.VolumeHandle,
|
||||
publishVolumeInfo,
|
||||
globalMapPath,
|
||||
fsTypeBlockName,
|
||||
accessMode,
|
||||
nodeStageSecrets,
|
||||
csiSource.VolumeAttributes)
|
||||
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed: %v", err))
|
||||
if err := os.RemoveAll(globalMapPath); err != nil {
|
||||
glog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err))
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPath))
|
||||
return globalMapPath, nil
|
||||
}
|
||||
|
||||
func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
if !m.plugin.blockEnabled {
|
||||
return errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath))
|
||||
|
||||
if m.spec == nil {
|
||||
glog.Error(log("blockMapper.MapDevice spec is nil"))
|
||||
return fmt.Errorf("spec is nil")
|
||||
}
|
||||
|
||||
csiSource, err := getCSISourceFromSpec(m.spec)
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.Map failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
dir := filepath.Join(volumeMapPath, volumeMapName)
|
||||
csi := m.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
nodeName := string(m.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
|
||||
attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err))
|
||||
return err
|
||||
}
|
||||
|
||||
if attachment == nil {
|
||||
glog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID))
|
||||
return errors.New("no existing VolumeAttachment found")
|
||||
}
|
||||
publishVolumeInfo := attachment.Status.AttachmentMetadata
|
||||
|
||||
nodePublishSecrets := map[string]string{}
|
||||
if csiSource.NodePublishSecretRef != nil {
|
||||
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
|
||||
if err != nil {
|
||||
glog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v",
|
||||
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to create dir %#v: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("blockMapper.MapDevice created NodePublish path [%s]", dir))
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := v1.ReadWriteOnce
|
||||
if m.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
|
||||
}
|
||||
|
||||
err = csi.NodePublishVolume(
|
||||
ctx,
|
||||
m.volumeID,
|
||||
m.readOnly,
|
||||
globalMapPath,
|
||||
dir,
|
||||
accessMode,
|
||||
publishVolumeInfo,
|
||||
csiSource.VolumeAttributes,
|
||||
nodePublishSecrets,
|
||||
fsTypeBlockName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("blockMapper.MapDevice failed: %v", err))
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
glog.Error(log("blockMapper.MapDevice failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
|
||||
|
||||
// TearDownDevice removes traces of the SetUpDevice.
|
||||
func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error {
|
||||
if !m.plugin.blockEnabled {
|
||||
return errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath))
|
||||
|
||||
csi := m.csiClient
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// unmap global device map path
|
||||
if err := csi.NodeUnstageVolume(ctx, m.volumeID, globalMapPath); err != nil {
|
||||
glog.Errorf(log("blockMapper.TearDownDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath))
|
||||
|
||||
// request to remove pod volume map path also
|
||||
podVolumePath, volumeName := m.GetPodDeviceMapPath()
|
||||
podVolumeMapPath := filepath.Join(podVolumePath, volumeName)
|
||||
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, podVolumeMapPath); err != nil {
|
||||
glog.Error(log("blockMapper.TearDownDevice failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath))
|
||||
|
||||
return nil
|
||||
}
|
264
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block_test.go
generated
vendored
Normal file
264
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block_test.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// TODO (vladimirvivien) specName with slashes will not work
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolumeName string
|
||||
path string
|
||||
}{
|
||||
{
|
||||
name: "simple specName",
|
||||
specVolumeName: "spec-0",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/%s/%s", "spec-0", "dev")),
|
||||
},
|
||||
{
|
||||
name: "specName with dots",
|
||||
specVolumeName: "test.spec.1",
|
||||
path: path.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/csi/volumeDevices/%s/%s", "test.spec.1", "dev")),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
|
||||
path, err := csiMapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Errorf("mapper GetGlobalMapPath failed: %v", err)
|
||||
}
|
||||
|
||||
if tc.path != path {
|
||||
t.Errorf("expecting path %s, got %s", tc.path, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperSetupDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
pvName := pv.GetName()
|
||||
nodeName := string(plug.host.GetNodeName())
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// MapDevice
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
csiMapper.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = true
|
||||
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
t.Log("created attachement ", attachID)
|
||||
|
||||
devicePath, err := csiMapper.SetUpDevice()
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to SetupDevice: %v", err)
|
||||
}
|
||||
|
||||
globalMapPath, err := csiMapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
if devicePath != globalMapPath {
|
||||
t.Fatalf("mapper.SetupDevice returned unexpected path %s instead of %v", devicePath, globalMapPath)
|
||||
}
|
||||
|
||||
vols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
|
||||
if vols[csiMapper.volumeID] != devicePath {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperMapDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
pvName := pv.GetName()
|
||||
nodeName := string(plug.host.GetNodeName())
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// MapDevice
|
||||
mapper, err := plug.NewBlockVolumeMapper(
|
||||
spec,
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new mapper: %v", err)
|
||||
}
|
||||
csiMapper := mapper.(*csiBlockMapper)
|
||||
csiMapper.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMapper.volumeID, csiMapper.driverName, string(nodeName))
|
||||
attachment := makeTestAttachment(attachID, nodeName, pvName)
|
||||
attachment.Status.Attached = true
|
||||
_, err = csiMapper.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup VolumeAttachment: %v", err)
|
||||
}
|
||||
t.Log("created attachement ", attachID)
|
||||
|
||||
devicePath, err := csiMapper.SetUpDevice()
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to SetupDevice: %v", err)
|
||||
}
|
||||
globalMapPath, err := csiMapper.GetGlobalMapPath(csiMapper.spec)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
// Map device to global and pod device map path
|
||||
volumeMapPath, volName := csiMapper.GetPodDeviceMapPath()
|
||||
err = csiMapper.MapDevice(devicePath, globalMapPath, volumeMapPath, volName, csiMapper.podUID)
|
||||
if err != nil {
|
||||
t.Fatalf("mapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(volumeMapPath, volName)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("mapper.MapDevice failed, volume path not created: %s", volumeMapPath)
|
||||
} else {
|
||||
t.Errorf("mapper.MapDevice failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
pubs := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if pubs[csiMapper.volumeID] != volumeMapPath {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockMapperTearDownDevice(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
fakeClient := fakeclient.NewSimpleClientset()
|
||||
host := volumetest.NewFakeVolumeHostWithNodeName(
|
||||
tmpDir,
|
||||
fakeClient,
|
||||
nil,
|
||||
"fakeNode",
|
||||
)
|
||||
plug.host = host
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
|
||||
// save volume data
|
||||
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
dir,
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make a new Unmapper: %v", err)
|
||||
}
|
||||
|
||||
csiUnmapper := unmapper.(*csiBlockMapper)
|
||||
csiUnmapper.csiClient = setupClient(t, true)
|
||||
|
||||
globalMapPath, err := csiUnmapper.GetGlobalMapPath(spec)
|
||||
if err != nil {
|
||||
t.Fatalf("unmapper failed to GetGlobalMapPath: %v", err)
|
||||
}
|
||||
|
||||
err = csiUnmapper.TearDownDevice(globalMapPath, "/dev/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ensure csi client call and node unstaged
|
||||
vols := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes()
|
||||
if _, ok := vols[csiUnmapper.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnstageVolume call")
|
||||
}
|
||||
|
||||
// ensure csi client call and node unpblished
|
||||
pubs := csiUnmapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if _, ok := pubs[csiUnmapper.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnpublishVolume call")
|
||||
}
|
||||
}
|
172
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go
generated
vendored
172
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go
generated
vendored
@ -17,20 +17,23 @@ limitations under the License.
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
"github.com/golang/glog"
|
||||
grpctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
api "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
type csiClient interface {
|
||||
NodePublishVolume(
|
||||
ctx grpctx.Context,
|
||||
ctx context.Context,
|
||||
volumeid string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
@ -42,11 +45,11 @@ type csiClient interface {
|
||||
fsType string,
|
||||
) error
|
||||
NodeUnpublishVolume(
|
||||
ctx grpctx.Context,
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
targetPath string,
|
||||
) error
|
||||
NodeStageVolume(ctx grpctx.Context,
|
||||
NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishVolumeInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
@ -55,55 +58,25 @@ type csiClient interface {
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error
|
||||
NodeUnstageVolume(ctx grpctx.Context, volID, stagingTargetPath string) error
|
||||
NodeGetCapabilities(ctx grpctx.Context) ([]*csipb.NodeServiceCapability, error)
|
||||
NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error
|
||||
NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error)
|
||||
}
|
||||
|
||||
// csiClient encapsulates all csi-plugin methods
|
||||
type csiDriverClient struct {
|
||||
network string
|
||||
addr string
|
||||
conn *grpc.ClientConn
|
||||
idClient csipb.IdentityClient
|
||||
nodeClient csipb.NodeClient
|
||||
ctrlClient csipb.ControllerClient
|
||||
versionAsserted bool
|
||||
versionSupported bool
|
||||
publishAsserted bool
|
||||
publishCapable bool
|
||||
driverName string
|
||||
nodeClient csipb.NodeClient
|
||||
}
|
||||
|
||||
func newCsiDriverClient(network, addr string) *csiDriverClient {
|
||||
return &csiDriverClient{network: network, addr: addr}
|
||||
}
|
||||
var _ csiClient = &csiDriverClient{}
|
||||
|
||||
// assertConnection ensures a valid connection has been established
|
||||
// if not, it creates a new connection and associated clients
|
||||
func (c *csiDriverClient) assertConnection() error {
|
||||
if c.conn == nil {
|
||||
conn, err := grpc.Dial(
|
||||
c.addr,
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.Dial(c.network, target)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.conn = conn
|
||||
c.idClient = csipb.NewIdentityClient(conn)
|
||||
c.nodeClient = csipb.NewNodeClient(conn)
|
||||
c.ctrlClient = csipb.NewControllerClient(conn)
|
||||
|
||||
// set supported version
|
||||
}
|
||||
|
||||
return nil
|
||||
func newCsiDriverClient(driverName string) *csiDriverClient {
|
||||
c := &csiDriverClient{driverName: driverName}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodePublishVolume(
|
||||
ctx grpctx.Context,
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
@ -121,10 +94,13 @@ func (c *csiDriverClient) NodePublishVolume(
|
||||
if targetPath == "" {
|
||||
return errors.New("missing target path")
|
||||
}
|
||||
if err := c.assertConnection(); err != nil {
|
||||
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodePublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
@ -137,22 +113,29 @@ func (c *csiDriverClient) NodePublishVolume(
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if stagingTargetPath != "" {
|
||||
req.StagingTargetPath = stagingTargetPath
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodePublishVolume(ctx, req)
|
||||
if fsType == fsTypeBlockName {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
|
||||
Block: &csipb.VolumeCapability_BlockVolume{},
|
||||
}
|
||||
} else {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
_, err = nodeClient.NodePublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error {
|
||||
func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
|
||||
glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
@ -160,21 +143,24 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string,
|
||||
if targetPath == "" {
|
||||
return errors.New("missing target path")
|
||||
}
|
||||
if err := c.assertConnection(); err != nil {
|
||||
glog.Error(log("failed to assert a connection: %v", err))
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeUnpublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeUnpublishVolume(ctx, req)
|
||||
_, err = nodeClient.NodeUnpublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeStageVolume(ctx grpctx.Context,
|
||||
func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
@ -190,10 +176,13 @@ func (c *csiDriverClient) NodeStageVolume(ctx grpctx.Context,
|
||||
if stagingTargetPath == "" {
|
||||
return errors.New("missing staging target path")
|
||||
}
|
||||
if err := c.assertConnection(); err != nil {
|
||||
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeStageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
@ -203,21 +192,28 @@ func (c *csiDriverClient) NodeStageVolume(ctx grpctx.Context,
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeStageSecrets: nodeStageSecrets,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeStageVolume(ctx, req)
|
||||
if fsType == fsTypeBlockName {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{
|
||||
Block: &csipb.VolumeCapability_BlockVolume{},
|
||||
}
|
||||
} else {
|
||||
req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
_, err = nodeClient.NodeStageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeUnstageVolume(ctx grpctx.Context, volID, stagingTargetPath string) error {
|
||||
func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
|
||||
glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
||||
if volID == "" {
|
||||
return errors.New("missing volume id")
|
||||
@ -225,27 +221,34 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx grpctx.Context, volID, stagingTa
|
||||
if stagingTargetPath == "" {
|
||||
return errors.New("missing staging target path")
|
||||
}
|
||||
if err := c.assertConnection(); err != nil {
|
||||
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeUnstageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
}
|
||||
_, err := c.nodeClient.NodeUnstageVolume(ctx, req)
|
||||
_, err = nodeClient.NodeUnstageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *csiDriverClient) NodeGetCapabilities(ctx grpctx.Context) ([]*csipb.NodeServiceCapability, error) {
|
||||
func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
|
||||
glog.V(4).Info(log("calling NodeGetCapabilities rpc"))
|
||||
if err := c.assertConnection(); err != nil {
|
||||
glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err)
|
||||
|
||||
conn, err := newGrpcConn(c.driverName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
nodeClient := csipb.NewNodeClient(conn)
|
||||
|
||||
req := &csipb.NodeGetCapabilitiesRequest{}
|
||||
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
|
||||
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -263,3 +266,28 @@ func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_A
|
||||
}
|
||||
return csipb.VolumeCapability_AccessMode_UNKNOWN
|
||||
}
|
||||
|
||||
func newGrpcConn(driverName string) (*grpc.ClientConn, error) {
|
||||
if driverName == "" {
|
||||
return nil, fmt.Errorf("driver name is empty")
|
||||
}
|
||||
addr := fmt.Sprintf(csiAddrTemplate, driverName)
|
||||
// TODO once KubeletPluginsWatcher graduates to beta, remove FeatureGate check
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) {
|
||||
driver, ok := csiDrivers.driversMap[driverName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
|
||||
}
|
||||
addr = driver.driverEndpoint
|
||||
}
|
||||
network := "unix"
|
||||
glog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr))
|
||||
|
||||
return grpc.Dial(
|
||||
addr,
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.Dial(network, target)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
139
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client_test.go
generated
vendored
139
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client_test.go
generated
vendored
@ -17,25 +17,128 @@ limitations under the License.
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
grpctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||
)
|
||||
|
||||
func setupClient(t *testing.T, stageUnstageSet bool) *csiDriverClient {
|
||||
client := newCsiDriverClient("unix", "/tmp/test.sock")
|
||||
client.conn = new(grpc.ClientConn) //avoids creating conn object
|
||||
type fakeCsiDriverClient struct {
|
||||
t *testing.T
|
||||
nodeClient *fake.NodeClient
|
||||
}
|
||||
|
||||
// setup mock grpc clients
|
||||
client.idClient = fake.NewIdentityClient()
|
||||
client.nodeClient = fake.NewNodeClient(stageUnstageSet)
|
||||
client.ctrlClient = fake.NewControllerClient()
|
||||
func newFakeCsiDriverClient(t *testing.T, stagingCapable bool) *fakeCsiDriverClient {
|
||||
return &fakeCsiDriverClient{
|
||||
t: t,
|
||||
nodeClient: fake.NewNodeClient(stagingCapable),
|
||||
}
|
||||
}
|
||||
|
||||
return client
|
||||
func (c *fakeCsiDriverClient) NodePublishVolume(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
readOnly bool,
|
||||
stagingTargetPath string,
|
||||
targetPath string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
volumeInfo map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
nodePublishSecrets map[string]string,
|
||||
fsType string,
|
||||
) error {
|
||||
c.t.Log("calling fake.NodePublishVolume...")
|
||||
req := &csipb.NodePublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
Readonly: readOnly,
|
||||
PublishInfo: volumeInfo,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
NodePublishSecrets: nodePublishSecrets,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodePublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
|
||||
c.t.Log("calling fake.NodeUnpublishVolume...")
|
||||
req := &csipb.NodeUnpublishVolumeRequest{
|
||||
VolumeId: volID,
|
||||
TargetPath: targetPath,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeUnpublishVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
|
||||
volID string,
|
||||
publishInfo map[string]string,
|
||||
stagingTargetPath string,
|
||||
fsType string,
|
||||
accessMode api.PersistentVolumeAccessMode,
|
||||
nodeStageSecrets map[string]string,
|
||||
volumeAttribs map[string]string,
|
||||
) error {
|
||||
c.t.Log("calling fake.NodeStageVolume...")
|
||||
req := &csipb.NodeStageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
PublishInfo: publishInfo,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
VolumeCapability: &csipb.VolumeCapability{
|
||||
AccessMode: &csipb.VolumeCapability_AccessMode{
|
||||
Mode: asCSIAccessMode(accessMode),
|
||||
},
|
||||
AccessType: &csipb.VolumeCapability_Mount{
|
||||
Mount: &csipb.VolumeCapability_MountVolume{
|
||||
FsType: fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeStageSecrets: nodeStageSecrets,
|
||||
VolumeAttributes: volumeAttribs,
|
||||
}
|
||||
|
||||
_, err := c.nodeClient.NodeStageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
|
||||
c.t.Log("calling fake.NodeUnstageVolume...")
|
||||
req := &csipb.NodeUnstageVolumeRequest{
|
||||
VolumeId: volID,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
}
|
||||
_, err := c.nodeClient.NodeUnstageVolume(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeCsiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) {
|
||||
c.t.Log("calling fake.NodeGetCapabilities...")
|
||||
req := &csipb.NodeGetCapabilitiesRequest{}
|
||||
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.GetCapabilities(), nil
|
||||
}
|
||||
|
||||
func setupClient(t *testing.T, stageUnstageSet bool) csiClient {
|
||||
return newFakeCsiDriverClient(t, stageUnstageSet)
|
||||
}
|
||||
|
||||
func TestClientNodePublishVolume(t *testing.T) {
|
||||
@ -58,9 +161,9 @@ func TestClientNodePublishVolume(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodePublishVolume(
|
||||
grpctx.Background(),
|
||||
context.Background(),
|
||||
tc.volID,
|
||||
false,
|
||||
"",
|
||||
@ -96,8 +199,8 @@ func TestClientNodeUnpublishVolume(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodeUnpublishVolume(grpctx.Background(), tc.volID, tc.targetPath)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeUnpublishVolume(context.Background(), tc.volID, tc.targetPath)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
@ -125,9 +228,9 @@ func TestClientNodeStageVolume(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeStageVolume(
|
||||
grpctx.Background(),
|
||||
context.Background(),
|
||||
tc.volID,
|
||||
map[string]string{"device": "/dev/null"},
|
||||
tc.stagingTargetPath,
|
||||
@ -161,9 +264,9 @@ func TestClientNodeUnstageVolume(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("Running test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
client.(*fakeCsiDriverClient).nodeClient.SetNextError(tc.err)
|
||||
err := client.NodeUnstageVolume(
|
||||
grpctx.Background(),
|
||||
context.Background(),
|
||||
tc.volID, tc.stagingTargetPath,
|
||||
)
|
||||
if tc.mustFail && err == nil {
|
||||
|
162
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
162
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
@ -17,14 +17,14 @@ limitations under the License.
|
||||
package csi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
grpctx "golang.org/x/net/context"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -34,8 +34,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const defaultFSType = "ext4"
|
||||
|
||||
//TODO (vladimirvivien) move this in a central loc later
|
||||
var (
|
||||
volDataKey = struct {
|
||||
@ -54,8 +52,8 @@ var (
|
||||
)
|
||||
|
||||
type csiMountMgr struct {
|
||||
k8s kubernetes.Interface
|
||||
csiClient csiClient
|
||||
k8s kubernetes.Interface
|
||||
plugin *csiPlugin
|
||||
driverName string
|
||||
volumeID string
|
||||
@ -118,8 +116,9 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
nodeName := string(c.plugin.host.GetNodeName())
|
||||
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
|
||||
|
||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so
|
||||
deviceMountPath := ""
|
||||
stageUnstageSet, err := hasStageUnstageCapability(ctx, csi)
|
||||
@ -153,6 +152,15 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
|
||||
attribs := csiSource.VolumeAttributes
|
||||
|
||||
nodePublishSecrets := map[string]string{}
|
||||
if csiSource.NodePublishSecretRef != nil {
|
||||
nodePublishSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodePublishSecretRef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching NodePublishSecretRef %s/%s failed: %v",
|
||||
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create target_dir before call to NodePublish
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
|
||||
@ -160,24 +168,6 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
}
|
||||
glog.V(4).Info(log("created target path successfully [%s]", dir))
|
||||
|
||||
// persist volume info data for teardown
|
||||
volData := map[string]string{
|
||||
volDataKey.specVolID: c.spec.Name(),
|
||||
volDataKey.volHandle: csiSource.VolumeHandle,
|
||||
volDataKey.driverName: csiSource.Driver,
|
||||
volDataKey.nodeName: nodeName,
|
||||
volDataKey.attachmentID: attachID,
|
||||
}
|
||||
|
||||
if err := saveVolumeData(c.plugin, c.podUID, c.spec.Name(), volData); err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to save volume info data: %v", err))
|
||||
if err := removeMountDir(c.plugin, dir); err != nil {
|
||||
glog.Error(log("mounter.SetUpAt failed to remove mount dir after a saveVolumeData() error [%s]: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
|
||||
accessMode := api.ReadWriteOnce
|
||||
if c.spec.PersistentVolume.Spec.AccessModes != nil {
|
||||
@ -185,13 +175,6 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
}
|
||||
|
||||
fsType := csiSource.FSType
|
||||
if len(fsType) == 0 {
|
||||
fsType = defaultFSType
|
||||
}
|
||||
nodePublishSecrets := map[string]string{}
|
||||
if csiSource.NodePublishSecretRef != nil {
|
||||
nodePublishSecrets = getCredentialsFromSecret(c.k8s, csiSource.NodePublishSecretRef)
|
||||
}
|
||||
err = csi.NodePublishVolume(
|
||||
ctx,
|
||||
c.volumeID,
|
||||
@ -207,22 +190,57 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf(log("mounter.SetupAt failed: %v", err))
|
||||
if err := removeMountDir(c.plugin, dir); err != nil {
|
||||
glog.Error(log("mounter.SetuAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
|
||||
return err
|
||||
if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil {
|
||||
glog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// apply volume ownership
|
||||
if !c.readOnly && fsGroup != nil {
|
||||
err := volume.SetVolumeOwnership(c, fsGroup)
|
||||
if err != nil {
|
||||
// attempt to rollback mount.
|
||||
glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err))
|
||||
glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID))
|
||||
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
|
||||
glog.Error(log(
|
||||
"mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)",
|
||||
c.volumeID, unpubErr, err,
|
||||
))
|
||||
return fmt.Errorf("%v (caused by %v)", unpubErr, err)
|
||||
}
|
||||
|
||||
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
|
||||
glog.Error(log(
|
||||
"mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)",
|
||||
dir, unmountErr, err,
|
||||
))
|
||||
return fmt.Errorf("%v (caused by %v)", unmountErr, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID))
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) GetAttributes() volume.Attributes {
|
||||
mounter := c.plugin.host.GetMounter(c.plugin.GetPluginName())
|
||||
path := c.GetPath()
|
||||
supportSelinux, err := mounter.GetSELinuxSupport(path)
|
||||
if err != nil {
|
||||
glog.V(2).Info(log("error checking for SELinux support: %s", err))
|
||||
// Best guess
|
||||
supportSelinux = false
|
||||
}
|
||||
return volume.Attributes{
|
||||
ReadOnly: c.readOnly,
|
||||
Managed: !c.readOnly,
|
||||
SupportsSELinux: false,
|
||||
SupportsSELinux: supportSelinux,
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,34 +267,12 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
glog.Error(log("mounter.TearDownAt failed to get CSI persistent source: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// load volume info from file
|
||||
dataDir := path.Dir(dir) // dropoff /mount at end
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("unmounter.Teardown failed to load volume data file using dir [%s]: %v", dir, err))
|
||||
return err
|
||||
}
|
||||
|
||||
volID := data[volDataKey.volHandle]
|
||||
driverName := data[volDataKey.driverName]
|
||||
|
||||
if c.csiClient == nil {
|
||||
addr := fmt.Sprintf(csiAddrTemplate, driverName)
|
||||
client := newCsiDriverClient("unix", addr)
|
||||
glog.V(4).Infof(log("unmounter csiClient setup [volume=%v,driver=%v]", volID, driverName))
|
||||
c.csiClient = client
|
||||
}
|
||||
|
||||
ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
volID := c.volumeID
|
||||
csi := c.csiClient
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
|
||||
glog.Errorf(log("mounter.TearDownAt failed: %v", err))
|
||||
return err
|
||||
@ -292,50 +288,6 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveVolumeData persists parameter data as json file using the location
|
||||
// generated by /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
|
||||
func saveVolumeData(p *csiPlugin, podUID types.UID, specVolID string, data map[string]string) error {
|
||||
dir := getTargetPath(podUID, specVolID, p.host)
|
||||
dataFilePath := path.Join(dir, volDataFileName)
|
||||
|
||||
file, err := os.Create(dataFilePath)
|
||||
if err != nil {
|
||||
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
if err := json.NewEncoder(file).Encode(data); err != nil {
|
||||
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadVolumeData uses the directory returned by mounter.GetPath with value
|
||||
// /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolumeId>/mount.
|
||||
// The function extracts specVolumeID and uses it to load the json data file from dir
|
||||
// /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
|
||||
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
|
||||
// remove /mount at the end
|
||||
dataFileName := path.Join(dir, fileName)
|
||||
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
|
||||
|
||||
file, err := os.Open(dataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
data := map[string]string{}
|
||||
if err := json.NewDecoder(file).Decode(&data); err != nil {
|
||||
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
|
||||
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
|
||||
mounter := plug.host.GetMounter(plug.GetPluginName())
|
||||
|
63
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go
generated
vendored
63
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go
generated
vendored
@ -31,8 +31,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/fake"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -78,7 +78,6 @@ func TestMounterGetPath(t *testing.T) {
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
|
||||
path := csiMounter.GetPath()
|
||||
t.Logf("*** GetPath: %s", path)
|
||||
|
||||
if tc.path != path {
|
||||
t.Errorf("expecting path %s, got %s", tc.path, path)
|
||||
@ -114,7 +113,7 @@ func TestMounterSetUp(t *testing.T) {
|
||||
}
|
||||
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
csiMounter.csiClient = setupClient(t, false)
|
||||
csiMounter.csiClient = setupClient(t, true)
|
||||
|
||||
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
|
||||
|
||||
@ -141,9 +140,16 @@ func TestMounterSetUp(t *testing.T) {
|
||||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
if err := csiMounter.SetUp(nil); err != nil {
|
||||
fsGroup := int64(2000)
|
||||
if err := csiMounter.SetUp(&fsGroup); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
}
|
||||
|
||||
//Test the default value of file system type is not overridden
|
||||
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
|
||||
t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
|
||||
}
|
||||
|
||||
path := csiMounter.GetPath()
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
@ -154,7 +160,7 @@ func TestMounterSetUp(t *testing.T) {
|
||||
}
|
||||
|
||||
// ensure call went all the way
|
||||
pubs := csiMounter.csiClient.(*csiDriverClient).nodeClient.(*fake.NodeClient).GetNodePublishedVolumes()
|
||||
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if pubs[csiMounter.volumeID] != csiMounter.GetPath() {
|
||||
t.Error("csi server may not have received NodePublishVolume call")
|
||||
}
|
||||
@ -163,39 +169,46 @@ func TestMounterSetUp(t *testing.T) {
|
||||
func TestUnmounterTeardown(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file prior to unmount
|
||||
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
// do a fake local mount
|
||||
diskMounter := util.NewSafeFormatAndMountFromHost(plug.GetPluginName(), plug.host)
|
||||
if err := diskMounter.FormatAndMount("/fake/device", dir, "testfs", nil); err != nil {
|
||||
t.Errorf("failed to mount dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
path.Dir(dir),
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
csiUnmounter := unmounter.(*csiMountMgr)
|
||||
csiUnmounter.csiClient = setupClient(t, false)
|
||||
|
||||
dir := csiUnmounter.GetPath()
|
||||
|
||||
// save the data file prior to unmount
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
if err := saveVolumeData(
|
||||
plug,
|
||||
testPodUID,
|
||||
"test-pv",
|
||||
map[string]string{volDataKey.specVolID: "test-pv", volDataKey.driverName: "driver", volDataKey.volHandle: "vol-handle"},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
csiUnmounter.csiClient = setupClient(t, true)
|
||||
err = csiUnmounter.TearDownAt(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ensure csi client call
|
||||
pubs := csiUnmounter.csiClient.(*csiDriverClient).nodeClient.(*fake.NodeClient).GetNodePublishedVolumes()
|
||||
pubs := csiUnmounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
|
||||
if _, ok := pubs[csiUnmounter.volumeID]; ok {
|
||||
t.Error("csi server may not have received NodeUnpublishVolume call")
|
||||
}
|
||||
@ -222,7 +235,7 @@ func TestSaveVolumeData(t *testing.T) {
|
||||
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
|
||||
}
|
||||
|
||||
err := saveVolumeData(plug, testPodUID, specVolID, tc.data)
|
||||
err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data)
|
||||
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Errorf("unexpected failure: %v", err)
|
||||
|
252
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
252
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
@ -19,14 +19,21 @@ package csi
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/labelmanager"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -40,16 +47,19 @@ const (
|
||||
csiTimeout = 15 * time.Second
|
||||
volNameSep = "^"
|
||||
volDataFileName = "vol_data.json"
|
||||
fsTypeBlockName = "block"
|
||||
)
|
||||
|
||||
type csiPlugin struct {
|
||||
host volume.VolumeHost
|
||||
host volume.VolumeHost
|
||||
blockEnabled bool
|
||||
}
|
||||
|
||||
// ProbeVolumePlugins returns implemented plugins
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
p := &csiPlugin{
|
||||
host: nil,
|
||||
host: nil,
|
||||
blockEnabled: utilfeature.DefaultFeatureGate.Enabled(features.CSIBlockVolume),
|
||||
}
|
||||
return []volume.VolumePlugin{p}
|
||||
}
|
||||
@ -57,9 +67,54 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
// volume.VolumePlugin methods
|
||||
var _ volume.VolumePlugin = &csiPlugin{}
|
||||
|
||||
type csiDriver struct {
|
||||
driverName string
|
||||
driverEndpoint string
|
||||
}
|
||||
|
||||
type csiDriversStore struct {
|
||||
driversMap map[string]csiDriver
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// csiDrivers map keep track of all registered CSI drivers on the node and their
|
||||
// corresponding sockets
|
||||
var csiDrivers csiDriversStore
|
||||
|
||||
var lm labelmanager.Interface
|
||||
|
||||
// RegistrationCallback is called by kubelet's plugin watcher upon detection
|
||||
// of a new registration socket opened by CSI Driver registrar side car.
|
||||
func RegistrationCallback(pluginName string, endpoint string, versions []string, socketPath string) (error, chan bool) {
|
||||
|
||||
glog.Infof(log("Callback from kubelet with plugin name: %s endpoint: %s versions: %s socket path: %s",
|
||||
pluginName, endpoint, strings.Join(versions, ","), socketPath))
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = socketPath
|
||||
}
|
||||
// Calling nodeLabelManager to update label for newly registered CSI driver
|
||||
err := lm.AddLabels(pluginName)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
|
||||
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
|
||||
csiDrivers.Lock()
|
||||
defer csiDrivers.Unlock()
|
||||
csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) Init(host volume.VolumeHost) error {
|
||||
glog.Info(log("plugin initializing..."))
|
||||
p.host = host
|
||||
|
||||
// Initializing csiDrivers map and label management channels
|
||||
csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}}
|
||||
lm = labelmanager.NewLabelManager(host.GetNodeName(), host.GetKubeClient())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -98,11 +153,10 @@ func (p *csiPlugin) NewMounter(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// before it is used in any paths such as socket etc
|
||||
addr := fmt.Sprintf(csiAddrTemplate, pvSource.Driver)
|
||||
glog.V(4).Infof(log("setting up mounter for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
|
||||
client := newCsiDriverClient("unix", addr)
|
||||
readOnly, err := getReadOnlyFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
@ -110,6 +164,8 @@ func (p *csiPlugin) NewMounter(
|
||||
return nil, errors.New("failed to get a Kubernetes client")
|
||||
}
|
||||
|
||||
csi := newCsiDriverClient(pvSource.Driver)
|
||||
|
||||
mounter := &csiMountMgr{
|
||||
plugin: p,
|
||||
k8s: k8s,
|
||||
@ -119,18 +175,66 @@ func (p *csiPlugin) NewMounter(
|
||||
driverName: pvSource.Driver,
|
||||
volumeID: pvSource.VolumeHandle,
|
||||
specVolumeID: spec.Name(),
|
||||
csiClient: client,
|
||||
csiClient: csi,
|
||||
readOnly: readOnly,
|
||||
}
|
||||
|
||||
// Save volume info in pod dir
|
||||
dir := mounter.GetPath()
|
||||
dataDir := path.Dir(dir) // dropoff /mount at end
|
||||
|
||||
if err := os.MkdirAll(dataDir, 0750); err != nil {
|
||||
glog.Error(log("failed to create dir %#v: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
glog.V(4).Info(log("created path successfully [%s]", dataDir))
|
||||
|
||||
// persist volume info data for teardown
|
||||
node := string(p.host.GetNodeName())
|
||||
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
|
||||
volData := map[string]string{
|
||||
volDataKey.specVolID: spec.Name(),
|
||||
volDataKey.volHandle: pvSource.VolumeHandle,
|
||||
volDataKey.driverName: pvSource.Driver,
|
||||
volDataKey.nodeName: node,
|
||||
volDataKey.attachmentID: attachID,
|
||||
}
|
||||
|
||||
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
|
||||
glog.Error(log("failed to save volume info data: %v", err))
|
||||
if err := os.RemoveAll(dataDir); err != nil {
|
||||
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("mounter created successfully"))
|
||||
|
||||
return mounter, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
|
||||
|
||||
unmounter := &csiMountMgr{
|
||||
plugin: p,
|
||||
podUID: podUID,
|
||||
specVolumeID: specName,
|
||||
}
|
||||
|
||||
// load volume info from file
|
||||
dir := unmounter.GetPath()
|
||||
dataDir := path.Dir(dir) // dropoff /mount at end
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err))
|
||||
return nil, err
|
||||
}
|
||||
unmounter.driverName = data[volDataKey.driverName]
|
||||
unmounter.volumeID = data[volDataKey.volHandle]
|
||||
unmounter.csiClient = newCsiDriverClient(unmounter.driverName)
|
||||
|
||||
return unmounter, nil
|
||||
}
|
||||
|
||||
@ -208,16 +312,132 @@ func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error)
|
||||
return mount.GetMountRefs(m, deviceMountPath)
|
||||
}
|
||||
|
||||
func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
|
||||
if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CSI != nil {
|
||||
return spec.PersistentVolume.Spec.CSI, nil
|
||||
// BlockVolumePlugin methods
|
||||
var _ volume.BlockVolumePlugin = &csiPlugin{}
|
||||
|
||||
func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opts volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
|
||||
pvSource, err := getCSISourceFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readOnly, err := getReadOnlyFromSpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
|
||||
client := newCsiDriverClient(pvSource.Driver)
|
||||
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
glog.Error(log("failed to get a kubernetes client"))
|
||||
return nil, errors.New("failed to get a Kubernetes client")
|
||||
}
|
||||
|
||||
mapper := &csiBlockMapper{
|
||||
csiClient: client,
|
||||
k8s: k8s,
|
||||
plugin: p,
|
||||
volumeID: pvSource.VolumeHandle,
|
||||
driverName: pvSource.Driver,
|
||||
readOnly: readOnly,
|
||||
spec: spec,
|
||||
podUID: podRef.UID,
|
||||
}
|
||||
|
||||
// Save volume info in pod dir
|
||||
dataDir := getVolumeDeviceDataDir(spec.Name(), p.host)
|
||||
|
||||
if err := os.MkdirAll(dataDir, 0750); err != nil {
|
||||
glog.Error(log("failed to create data dir %s: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
glog.V(4).Info(log("created path successfully [%s]", dataDir))
|
||||
|
||||
// persist volume info data for teardown
|
||||
node := string(p.host.GetNodeName())
|
||||
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
|
||||
volData := map[string]string{
|
||||
volDataKey.specVolID: spec.Name(),
|
||||
volDataKey.volHandle: pvSource.VolumeHandle,
|
||||
volDataKey.driverName: pvSource.Driver,
|
||||
volDataKey.nodeName: node,
|
||||
volDataKey.attachmentID: attachID,
|
||||
}
|
||||
|
||||
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
|
||||
glog.Error(log("failed to save volume info data: %v", err))
|
||||
if err := os.RemoveAll(dataDir); err != nil {
|
||||
glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mapper, nil
|
||||
}
|
||||
|
||||
// log prepends log string with `kubernetes.io/csi`
|
||||
func log(msg string, parts ...interface{}) string {
|
||||
return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...)
|
||||
func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
|
||||
unmapper := &csiBlockMapper{
|
||||
plugin: p,
|
||||
podUID: podUID,
|
||||
specName: volName,
|
||||
}
|
||||
|
||||
// load volume info from file
|
||||
dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host)
|
||||
data, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
|
||||
return nil, err
|
||||
}
|
||||
unmapper.driverName = data[volDataKey.driverName]
|
||||
unmapper.volumeID = data[volDataKey.volHandle]
|
||||
unmapper.csiClient = newCsiDriverClient(unmapper.driverName)
|
||||
|
||||
return unmapper, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapPath string) (*volume.Spec, error) {
|
||||
if !p.blockEnabled {
|
||||
return nil, errors.New("CSIBlockVolume feature not enabled")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
|
||||
|
||||
dataDir := getVolumeDeviceDataDir(specVolName, p.host)
|
||||
volData, err := loadVolumeData(dataDir, volDataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
|
||||
|
||||
blockMode := api.PersistentVolumeBlock
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: volData[volDataKey.specVolID],
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
CSI: &api.CSIPersistentVolumeSource{
|
||||
Driver: volData[volDataKey.driverName],
|
||||
VolumeHandle: volData[volDataKey.volHandle],
|
||||
},
|
||||
},
|
||||
VolumeMode: &blockMode,
|
||||
},
|
||||
}
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(pv, false), nil
|
||||
}
|
||||
|
200
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin_test.go
generated
vendored
200
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin_test.go
generated
vendored
@ -20,12 +20,14 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -34,6 +36,11 @@ import (
|
||||
|
||||
// create a plugin mgr to load plugins and setup a fake client
|
||||
func newTestPlugin(t *testing.T) (*csiPlugin, string) {
|
||||
err := utilfeature.DefaultFeatureGate.Set("CSIBlockVolume=true")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to enable feature gate for CSIBlockVolume: %v", err)
|
||||
}
|
||||
|
||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't create temp dir: %v", err)
|
||||
@ -160,7 +167,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
|
||||
}
|
||||
if err := saveVolumeData(plug, testPodUID, tc.specVolID, tc.data); err != nil {
|
||||
if err := saveVolumeData(path.Dir(mountDir), volDataFileName, tc.data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -215,7 +222,21 @@ func TestPluginNewMounter(t *testing.T) {
|
||||
t.Error("mounter pod not set")
|
||||
}
|
||||
if csiMounter.podUID == types.UID("") {
|
||||
t.Error("mounter podUID mot set")
|
||||
t.Error("mounter podUID not set")
|
||||
}
|
||||
if csiMounter.csiClient == nil {
|
||||
t.Error("mounter csiClient is nil")
|
||||
}
|
||||
|
||||
// ensure data file is created
|
||||
dataDir := path.Dir(mounter.GetPath())
|
||||
dataFile := filepath.Join(dataDir, volDataFileName)
|
||||
if _, err := os.Stat(dataFile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("data file not created %s", dataFile)
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -225,6 +246,25 @@ func TestPluginNewUnmounter(t *testing.T) {
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file to re-create client
|
||||
dir := path.Join(getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host), "/mount")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
path.Dir(dir),
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
// test unmounter
|
||||
unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
|
||||
csiUnmounter := unmounter.(*csiMountMgr)
|
||||
|
||||
@ -240,6 +280,9 @@ func TestPluginNewUnmounter(t *testing.T) {
|
||||
t.Error("podUID not set")
|
||||
}
|
||||
|
||||
if csiUnmounter.csiClient == nil {
|
||||
t.Error("unmounter csiClient is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewAttacher(t *testing.T) {
|
||||
@ -277,3 +320,156 @@ func TestPluginNewDetacher(t *testing.T) {
|
||||
t.Error("Kubernetes client not set for detacher")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewBlockMapper(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-block-pv", 10, testDriver, testVol)
|
||||
mounter, err := plug.NewBlockVolumeMapper(
|
||||
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new BlockMapper: %v", err)
|
||||
}
|
||||
|
||||
if mounter == nil {
|
||||
t.Fatal("failed to create CSI BlockMapper, mapper is nill")
|
||||
}
|
||||
csiMapper := mounter.(*csiBlockMapper)
|
||||
|
||||
// validate mounter fields
|
||||
if csiMapper.driverName != testDriver {
|
||||
t.Error("CSI block mapper missing driver name")
|
||||
}
|
||||
if csiMapper.volumeID != testVol {
|
||||
t.Error("CSI block mapper missing volumeID")
|
||||
}
|
||||
|
||||
if csiMapper.podUID == types.UID("") {
|
||||
t.Error("CSI block mapper missing pod.UID")
|
||||
}
|
||||
if csiMapper.csiClient == nil {
|
||||
t.Error("mapper csiClient is nil")
|
||||
}
|
||||
|
||||
// ensure data file is created
|
||||
dataFile := getVolumeDeviceDataDir(csiMapper.spec.Name(), plug.host)
|
||||
if _, err := os.Stat(dataFile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("data file not created %s", dataFile)
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginNewUnmapper(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pv := makeTestPV("test-pv", 10, testDriver, testVol)
|
||||
|
||||
// save the data file to re-create client
|
||||
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := saveVolumeData(
|
||||
dir,
|
||||
volDataFileName,
|
||||
map[string]string{
|
||||
volDataKey.specVolID: pv.ObjectMeta.Name,
|
||||
volDataKey.driverName: testDriver,
|
||||
volDataKey.volHandle: testVol,
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
// test unmounter
|
||||
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
|
||||
csiUnmapper := unmapper.(*csiBlockMapper)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
|
||||
if csiUnmapper == nil {
|
||||
t.Fatal("failed to create CSI Unmounter")
|
||||
}
|
||||
|
||||
if csiUnmapper.podUID != testPodUID {
|
||||
t.Error("podUID not set")
|
||||
}
|
||||
|
||||
if csiUnmapper.specName != pv.ObjectMeta.Name {
|
||||
t.Error("specName not set")
|
||||
}
|
||||
|
||||
if csiUnmapper.csiClient == nil {
|
||||
t.Error("unmapper csiClient is nil")
|
||||
}
|
||||
|
||||
// test loaded vol data
|
||||
if csiUnmapper.driverName != testDriver {
|
||||
t.Error("unmapper driverName not set")
|
||||
}
|
||||
if csiUnmapper.volumeID != testVol {
|
||||
t.Error("unmapper volumeHandle not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginConstructBlockVolumeSpec(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
specVolID string
|
||||
data map[string]string
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "valid spec name",
|
||||
specVolID: "test.vol.id",
|
||||
data: map[string]string{volDataKey.specVolID: "test.vol.id", volDataKey.volHandle: "test-vol0", volDataKey.driverName: "test-driver0"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Logf("test case: %s", tc.name)
|
||||
deviceDataDir := getVolumeDeviceDataDir(tc.specVolID, plug.host)
|
||||
|
||||
// create data file in csi plugin dir
|
||||
if tc.data != nil {
|
||||
if err := os.MkdirAll(deviceDataDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
t.Errorf("failed to create dir [%s]: %v", deviceDataDir, err)
|
||||
}
|
||||
if err := saveVolumeData(deviceDataDir, volDataFileName, tc.data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// rebuild spec
|
||||
spec, err := plug.ConstructBlockVolumeSpec("test-podUID", tc.specVolID, getVolumeDevicePluginDir(tc.specVolID, plug.host))
|
||||
if tc.shouldFail {
|
||||
if err == nil {
|
||||
t.Fatal("expecting ConstructVolumeSpec to fail, but got nil error")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
|
||||
if volHandle != tc.data[volDataKey.volHandle] {
|
||||
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
|
||||
}
|
||||
|
||||
if spec.Name() != tc.specVolID {
|
||||
t.Errorf("Unexpected spec name %s", spec.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
93
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go
generated
vendored
93
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go
generated
vendored
@ -17,22 +17,107 @@ limitations under the License.
|
||||
package csi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
api "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) map[string]string {
|
||||
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) {
|
||||
credentials := map[string]string{}
|
||||
secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
|
||||
return credentials
|
||||
glog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err)
|
||||
return credentials, err
|
||||
}
|
||||
for key, value := range secret.Data {
|
||||
credentials[key] = string(value)
|
||||
}
|
||||
|
||||
return credentials
|
||||
return credentials, nil
|
||||
}
|
||||
|
||||
// saveVolumeData persists parameter data as json file at the provided location
|
||||
func saveVolumeData(dir string, fileName string, data map[string]string) error {
|
||||
dataFilePath := path.Join(dir, fileName)
|
||||
glog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
|
||||
file, err := os.Create(dataFilePath)
|
||||
if err != nil {
|
||||
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
if err := json.NewEncoder(file).Encode(data); err != nil {
|
||||
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadVolumeData loads volume info from specified json file/location
|
||||
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
|
||||
// remove /mount at the end
|
||||
dataFileName := path.Join(dir, fileName)
|
||||
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
|
||||
|
||||
file, err := os.Open(dataFileName)
|
||||
if err != nil {
|
||||
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
data := map[string]string{}
|
||||
if err := json.NewDecoder(file).Decode(&data); err != nil {
|
||||
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
|
||||
if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CSI != nil {
|
||||
return spec.PersistentVolume.Spec.CSI, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
|
||||
}
|
||||
|
||||
func getReadOnlyFromSpec(spec *volume.Spec) (bool, error) {
|
||||
if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.CSI != nil {
|
||||
return spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
|
||||
}
|
||||
|
||||
// log prepends log string with `kubernetes.io/csi`
|
||||
func log(msg string, parts ...interface{}) string {
|
||||
return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...)
|
||||
}
|
||||
|
||||
// getVolumeDevicePluginDir returns the path where the CSI plugin keeps the
|
||||
// symlink for a block device associated with a given specVolumeID.
|
||||
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/dev
|
||||
func getVolumeDevicePluginDir(specVolID string, host volume.VolumeHost) string {
|
||||
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
|
||||
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "dev")
|
||||
}
|
||||
|
||||
// getVolumeDeviceDataDir returns the path where the CSI plugin keeps the
|
||||
// volume data for a block device associated with a given specVolumeID.
|
||||
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/data
|
||||
func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
|
||||
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
|
||||
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data")
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/BUILD
generated
vendored
@ -7,7 +7,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
],
|
||||
)
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/fake_client.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/csi/fake/fake_client.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
csipb "github.com/container-storage-interface/spec/lib/go/csi/v0"
|
||||
grpctx "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// IdentityClient is a CSI identity client used for testing
|
||||
@ -94,7 +93,7 @@ func (f *NodeClient) AddNodeStagedVolume(volID, deviceMountPath string) {
|
||||
}
|
||||
|
||||
// NodePublishVolume implements CSI NodePublishVolume
|
||||
func (f *NodeClient) NodePublishVolume(ctx grpctx.Context, req *csipb.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodePublishVolumeResponse, error) {
|
||||
func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodePublishVolumeResponse, error) {
|
||||
|
||||
if f.nextErr != nil {
|
||||
return nil, f.nextErr
|
||||
@ -106,7 +105,7 @@ func (f *NodeClient) NodePublishVolume(ctx grpctx.Context, req *csipb.NodePublis
|
||||
if req.GetTargetPath() == "" {
|
||||
return nil, errors.New("missing target path")
|
||||
}
|
||||
fsTypes := "ext4|xfs|zfs"
|
||||
fsTypes := "block|ext4|xfs|zfs"
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
if !strings.Contains(fsTypes, fsType) {
|
||||
return nil, errors.New("invalid fstype")
|
||||
@ -145,7 +144,7 @@ func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVo
|
||||
}
|
||||
|
||||
fsType := ""
|
||||
fsTypes := "ext4|xfs|zfs"
|
||||
fsTypes := "block|ext4|xfs|zfs"
|
||||
mounted := req.GetVolumeCapability().GetMount()
|
||||
if mounted != nil {
|
||||
fsType = mounted.GetFsType()
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/BUILD
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/BUILD
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["labelmanager.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/csi/labelmanager",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
251
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/labelmanager.go
generated
vendored
Normal file
251
vendor/k8s.io/kubernetes/pkg/volume/csi/labelmanager/labelmanager.go
generated
vendored
Normal file
@ -0,0 +1,251 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package labelmanager includes internal functions used to add/delete labels to
|
||||
// kubernetes nodes for corresponding CSI drivers
|
||||
package labelmanager // import "k8s.io/kubernetes/pkg/volume/csi/labelmanager"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name of node annotation that contains JSON map of driver names to node
|
||||
// names
|
||||
annotationKey = "csi.volume.kubernetes.io/nodeid"
|
||||
csiPluginName = "kubernetes.io/csi"
|
||||
)
|
||||
|
||||
// labelManagementStruct is struct of channels used for communication between the driver registration
|
||||
// code and the go routine responsible for managing the node's labels
|
||||
type labelManagerStruct struct {
|
||||
nodeName types.NodeName
|
||||
k8s kubernetes.Interface
|
||||
}
|
||||
|
||||
// Interface implements an interface for managing labels of a node
|
||||
type Interface interface {
|
||||
AddLabels(driverName string) error
|
||||
}
|
||||
|
||||
// NewLabelManager initializes labelManagerStruct and returns available interfaces
|
||||
func NewLabelManager(nodeName types.NodeName, kubeClient kubernetes.Interface) Interface {
|
||||
return labelManagerStruct{
|
||||
nodeName: nodeName,
|
||||
k8s: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
// nodeLabelManager waits for labeling requests initiated by the driver's registration
|
||||
// process.
|
||||
func (lm labelManagerStruct) AddLabels(driverName string) error {
|
||||
err := verifyAndAddNodeId(string(lm.nodeName), lm.k8s.CoreV1().Nodes(), driverName, string(lm.nodeName))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update node %s's annotation with error: %+v", lm.nodeName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clones the given map and returns a new map with the given key and value added.
|
||||
// Returns the given map, if annotationKey is empty.
|
||||
func cloneAndAddAnnotation(
|
||||
annotations map[string]string,
|
||||
annotationKey,
|
||||
annotationValue string) map[string]string {
|
||||
if annotationKey == "" {
|
||||
// Don't need to add an annotation.
|
||||
return annotations
|
||||
}
|
||||
// Clone.
|
||||
newAnnotations := map[string]string{}
|
||||
for key, value := range annotations {
|
||||
newAnnotations[key] = value
|
||||
}
|
||||
newAnnotations[annotationKey] = annotationValue
|
||||
return newAnnotations
|
||||
}
|
||||
|
||||
func verifyAndAddNodeId(
|
||||
k8sNodeName string,
|
||||
k8sNodesClient corev1.NodeInterface,
|
||||
csiDriverName string,
|
||||
csiDriverNodeId string) error {
|
||||
// Add or update annotation on Node object
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Node before attempting update, so that
|
||||
// existing changes are not overwritten. RetryOnConflict uses
|
||||
// exponential backoff to avoid exhausting the apiserver.
|
||||
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
glog.Errorf("Failed to get latest version of Node: %v", getErr)
|
||||
return getErr // do not wrap error
|
||||
}
|
||||
|
||||
var previousAnnotationValue string
|
||||
if result.ObjectMeta.Annotations != nil {
|
||||
previousAnnotationValue =
|
||||
result.ObjectMeta.Annotations[annotationKey]
|
||||
glog.V(3).Infof(
|
||||
"previousAnnotationValue=%q", previousAnnotationValue)
|
||||
}
|
||||
|
||||
existingDriverMap := map[string]string{}
|
||||
if previousAnnotationValue != "" {
|
||||
// Parse previousAnnotationValue as JSON
|
||||
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to parse node's %q annotation value (%q) err=%v",
|
||||
annotationKey,
|
||||
previousAnnotationValue,
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := existingDriverMap[csiDriverName]; ok {
|
||||
if val == csiDriverNodeId {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key value {%q: %q} alredy eixst in node %q annotation, no need to update: %v",
|
||||
csiDriverName,
|
||||
csiDriverNodeId,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Add/update annotation value
|
||||
existingDriverMap[csiDriverName] = csiDriverNodeId
|
||||
jsonObj, err := json.Marshal(existingDriverMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed while trying to add key value {%q: %q} to node %q annotation. Existing value: %v",
|
||||
csiDriverName,
|
||||
csiDriverNodeId,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
}
|
||||
|
||||
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
|
||||
result.ObjectMeta.Annotations,
|
||||
annotationKey,
|
||||
string(jsonObj))
|
||||
_, updateErr := k8sNodesClient.Update(result)
|
||||
if updateErr == nil {
|
||||
fmt.Printf(
|
||||
"Updated node %q successfully for CSI driver %q and CSI node name %q",
|
||||
k8sNodeName,
|
||||
csiDriverName,
|
||||
csiDriverNodeId)
|
||||
}
|
||||
return updateErr // do not wrap error
|
||||
})
|
||||
if retryErr != nil {
|
||||
return fmt.Errorf("node update failed: %v", retryErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetches Kubernetes node API object corresponding to k8sNodeName.
|
||||
// If the csiDriverName is present in the node annotation, it is removed.
|
||||
func verifyAndDeleteNodeId(
|
||||
k8sNodeName string,
|
||||
k8sNodesClient corev1.NodeInterface,
|
||||
csiDriverName string) error {
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Node before attempting update, so that
|
||||
// existing changes are not overwritten. RetryOnConflict uses
|
||||
// exponential backoff to avoid exhausting the apiserver.
|
||||
result, getErr := k8sNodesClient.Get(k8sNodeName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
glog.Errorf("failed to get latest version of Node: %v", getErr)
|
||||
return getErr // do not wrap error
|
||||
}
|
||||
|
||||
var previousAnnotationValue string
|
||||
if result.ObjectMeta.Annotations != nil {
|
||||
previousAnnotationValue =
|
||||
result.ObjectMeta.Annotations[annotationKey]
|
||||
glog.V(3).Infof(
|
||||
"previousAnnotationValue=%q", previousAnnotationValue)
|
||||
}
|
||||
|
||||
existingDriverMap := map[string]string{}
|
||||
if previousAnnotationValue == "" {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key %q does not exist in node %q annotation, no need to cleanup.",
|
||||
csiDriverName,
|
||||
annotationKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse previousAnnotationValue as JSON
|
||||
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to parse node's %q annotation value (%q) err=%v",
|
||||
annotationKey,
|
||||
previousAnnotationValue,
|
||||
err)
|
||||
}
|
||||
|
||||
if _, ok := existingDriverMap[csiDriverName]; !ok {
|
||||
// Value already exists in node annotation, nothing more to do
|
||||
glog.V(1).Infof(
|
||||
"The key %q does not eixst in node %q annotation, no need to cleanup: %v",
|
||||
csiDriverName,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add/update annotation value
|
||||
delete(existingDriverMap, csiDriverName)
|
||||
jsonObj, err := json.Marshal(existingDriverMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
|
||||
csiDriverName,
|
||||
annotationKey,
|
||||
previousAnnotationValue)
|
||||
}
|
||||
|
||||
result.ObjectMeta.Annotations = cloneAndAddAnnotation(
|
||||
result.ObjectMeta.Annotations,
|
||||
annotationKey,
|
||||
string(jsonObj))
|
||||
_, updateErr := k8sNodesClient.Update(result)
|
||||
if updateErr == nil {
|
||||
fmt.Printf(
|
||||
"Updated node %q annotation to remove CSI driver %q.",
|
||||
k8sNodeName,
|
||||
csiDriverName)
|
||||
}
|
||||
return updateErr // do not wrap error
|
||||
})
|
||||
if retryErr != nil {
|
||||
return fmt.Errorf("node update failed: %v", retryErr)
|
||||
}
|
||||
return nil
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
@ -19,6 +19,7 @@ package downwardapi
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@ -179,13 +180,6 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, *b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode)
|
||||
if err != nil {
|
||||
@ -193,6 +187,15 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, *b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
@ -229,7 +232,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu
|
||||
data := make(map[string]volumeutil.FileProjection)
|
||||
for _, fileInfo := range items {
|
||||
var fileProjection volumeutil.FileProjection
|
||||
fPath := path.Clean(fileInfo.Path)
|
||||
fPath := filepath.Clean(fileInfo.Path)
|
||||
if fileInfo.Mode != nil {
|
||||
fileProjection.Mode = *fileInfo.Mode
|
||||
} else {
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go
generated
vendored
@ -443,6 +443,10 @@ func (b *fcDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *fcDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
type fcDiskUnmapper struct {
|
||||
*fcDisk
|
||||
deviceUtil util.DeviceUtil
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/fc/fc_test.go
generated
vendored
@ -19,6 +19,7 @@ package fc
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -424,6 +425,9 @@ func Test_getWwnsLunWwidsError(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_ConstructVolumeSpec(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skipf("Test_ConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
|
||||
}
|
||||
fm := &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
|
||||
@ -437,7 +441,10 @@ func Test_ConstructVolumeSpec(t *testing.T) {
|
||||
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2",
|
||||
}
|
||||
for _, path := range mountPaths {
|
||||
refs, _ := mount.GetMountRefs(fm, path)
|
||||
refs, err := mount.GetMountRefs(fm, path)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get mountrefs. err: %v", err)
|
||||
}
|
||||
var globalPDPath string
|
||||
for _, ref := range refs {
|
||||
if strings.Contains(ref, "kubernetes.io/fc") {
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go
generated
vendored
@ -30,13 +30,13 @@ type attacherDefaults flexVolumeAttacher
|
||||
|
||||
// Attach is part of the volume.Attacher interface
|
||||
func (a *attacherDefaults) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) {
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name, ", host ", hostName)
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// WaitForAttach is part of the volume.Attacher interface
|
||||
func (a *attacherDefaults) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name, ", device ", devicePath)
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath)
|
||||
return devicePath, nil
|
||||
}
|
||||
|
||||
@ -47,7 +47,7 @@ func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir strin
|
||||
|
||||
// MountDevice is part of the volume.Attacher interface
|
||||
func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name, ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
|
||||
glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath)
|
||||
|
||||
volSourceFSType, err := getFSType(spec)
|
||||
if err != nil {
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume_test.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const execScriptTempl1 = `#!/bin/bash
|
||||
const execScriptTempl1 = `#!/usr/bin/env bash
|
||||
if [ "$1" == "init" -a $# -eq 1 ]; then
|
||||
echo -n '{
|
||||
"status": "Success"
|
||||
@ -73,7 +73,7 @@ exit 1
|
||||
echo -n $@ &> {{.OutputFile}}
|
||||
`
|
||||
|
||||
const execScriptTempl2 = `#!/bin/bash
|
||||
const execScriptTempl2 = `#!/usr/bin/env bash
|
||||
if [ "$1" == "init" -a $# -eq 1 ]; then
|
||||
echo -n '{
|
||||
"status": "Success"
|
||||
|
185
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe.go
generated
vendored
185
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe.go
generated
vendored
@ -25,31 +25,24 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type flexVolumeProber struct {
|
||||
mutex sync.Mutex
|
||||
pluginDir string // Flexvolume driver directory
|
||||
watcher utilfs.FSWatcher
|
||||
probeNeeded bool // Must only read and write this through testAndSetProbeNeeded.
|
||||
lastUpdated time.Time // Last time probeNeeded was updated.
|
||||
watchEventCount int
|
||||
factory PluginFactory
|
||||
fs utilfs.Filesystem
|
||||
mutex sync.Mutex
|
||||
pluginDir string // Flexvolume driver directory
|
||||
watcher utilfs.FSWatcher
|
||||
factory PluginFactory
|
||||
fs utilfs.Filesystem
|
||||
probeAllNeeded bool
|
||||
eventsMap map[string]volume.ProbeOperation // the key is the driver directory path, the value is the coresponding operation
|
||||
}
|
||||
|
||||
const (
|
||||
// TODO (cxing) Tune these params based on test results.
|
||||
// watchEventLimit is the max allowable number of processed watches within watchEventInterval.
|
||||
watchEventInterval = 5 * time.Second
|
||||
watchEventLimit = 20
|
||||
)
|
||||
|
||||
func GetDynamicPluginProber(pluginDir string) volume.DynamicPluginProber {
|
||||
return &flexVolumeProber{
|
||||
pluginDir: pluginDir,
|
||||
@ -60,8 +53,8 @@ func GetDynamicPluginProber(pluginDir string) volume.DynamicPluginProber {
|
||||
}
|
||||
|
||||
func (prober *flexVolumeProber) Init() error {
|
||||
prober.testAndSetProbeNeeded(true)
|
||||
prober.lastUpdated = time.Now()
|
||||
prober.testAndSetProbeAllNeeded(true)
|
||||
prober.eventsMap = map[string]volume.ProbeOperation{}
|
||||
|
||||
if err := prober.createPluginDir(); err != nil {
|
||||
return err
|
||||
@ -73,26 +66,44 @@ func (prober *flexVolumeProber) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Probes for Flexvolume drivers.
|
||||
// If a filesystem update has occurred since the last probe, updated = true
|
||||
// and the list of probed plugins is returned.
|
||||
// Otherwise, update = false and probedPlugins = nil.
|
||||
//
|
||||
// If an error occurs, updated and plugins are set arbitrarily.
|
||||
func (prober *flexVolumeProber) Probe() (updated bool, plugins []volume.VolumePlugin, err error) {
|
||||
probeNeeded := prober.testAndSetProbeNeeded(false)
|
||||
|
||||
if !probeNeeded {
|
||||
return false, nil, nil
|
||||
// If probeAllNeeded is true, probe all pluginDir
|
||||
// else probe events in eventsMap
|
||||
func (prober *flexVolumeProber) Probe() (events []volume.ProbeEvent, err error) {
|
||||
if prober.probeAllNeeded {
|
||||
prober.testAndSetProbeAllNeeded(false)
|
||||
return prober.probeAll()
|
||||
}
|
||||
|
||||
return prober.probeMap()
|
||||
}
|
||||
|
||||
func (prober *flexVolumeProber) probeMap() (events []volume.ProbeEvent, err error) {
|
||||
// TODO use a concurrent map to avoid Locking the entire map
|
||||
prober.mutex.Lock()
|
||||
defer prober.mutex.Unlock()
|
||||
probeEvents := []volume.ProbeEvent{}
|
||||
allErrs := []error{}
|
||||
for driverDirPathAbs, op := range prober.eventsMap {
|
||||
driverDirName := filepath.Base(driverDirPathAbs) // e.g. driverDirName = vendor~cifs
|
||||
probeEvent, pluginErr := prober.newProbeEvent(driverDirName, op)
|
||||
if pluginErr != nil {
|
||||
allErrs = append(allErrs, pluginErr)
|
||||
continue
|
||||
}
|
||||
probeEvents = append(probeEvents, probeEvent)
|
||||
|
||||
delete(prober.eventsMap, driverDirPathAbs)
|
||||
}
|
||||
return probeEvents, errors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
func (prober *flexVolumeProber) probeAll() (events []volume.ProbeEvent, err error) {
|
||||
probeEvents := []volume.ProbeEvent{}
|
||||
allErrs := []error{}
|
||||
files, err := prober.fs.ReadDir(prober.pluginDir)
|
||||
if err != nil {
|
||||
return false, nil, fmt.Errorf("Error reading the Flexvolume directory: %s", err)
|
||||
return nil, fmt.Errorf("Error reading the Flexvolume directory: %s", err)
|
||||
}
|
||||
|
||||
plugins = []volume.VolumePlugin{}
|
||||
allErrs := []error{}
|
||||
for _, f := range files {
|
||||
// only directories with names that do not begin with '.' are counted as plugins
|
||||
// and pluginDir/dirname/dirname should be an executable
|
||||
@ -100,20 +111,39 @@ func (prober *flexVolumeProber) Probe() (updated bool, plugins []volume.VolumePl
|
||||
// e.g. dirname = vendor~cifs
|
||||
// then, executable will be pluginDir/dirname/cifs
|
||||
if f.IsDir() && filepath.Base(f.Name())[0] != '.' {
|
||||
plugin, pluginErr := prober.factory.NewFlexVolumePlugin(prober.pluginDir, f.Name())
|
||||
probeEvent, pluginErr := prober.newProbeEvent(f.Name(), volume.ProbeAddOrUpdate)
|
||||
if pluginErr != nil {
|
||||
pluginErr = fmt.Errorf(
|
||||
"Error creating Flexvolume plugin from directory %s, skipping. Error: %s",
|
||||
f.Name(), pluginErr)
|
||||
allErrs = append(allErrs, pluginErr)
|
||||
continue
|
||||
}
|
||||
|
||||
plugins = append(plugins, plugin)
|
||||
probeEvents = append(probeEvents, probeEvent)
|
||||
}
|
||||
}
|
||||
return probeEvents, errors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
return true, plugins, errors.NewAggregate(allErrs)
|
||||
func (prober *flexVolumeProber) newProbeEvent(driverDirName string, op volume.ProbeOperation) (volume.ProbeEvent, error) {
|
||||
probeEvent := volume.ProbeEvent{
|
||||
Op: op,
|
||||
}
|
||||
if op == volume.ProbeAddOrUpdate {
|
||||
plugin, pluginErr := prober.factory.NewFlexVolumePlugin(prober.pluginDir, driverDirName)
|
||||
if pluginErr != nil {
|
||||
pluginErr = fmt.Errorf(
|
||||
"Error creating Flexvolume plugin from directory %s, skipping. Error: %s",
|
||||
driverDirName, pluginErr)
|
||||
return probeEvent, pluginErr
|
||||
}
|
||||
probeEvent.Plugin = plugin
|
||||
probeEvent.PluginName = plugin.GetPluginName()
|
||||
} else if op == volume.ProbeRemove {
|
||||
driverName := utilstrings.UnescapePluginName(driverDirName)
|
||||
probeEvent.PluginName = flexVolumePluginNamePrefix + driverName
|
||||
|
||||
} else {
|
||||
return probeEvent, fmt.Errorf("Unknown Operation on directory: %s. ", driverDirName)
|
||||
}
|
||||
return probeEvent, nil
|
||||
}
|
||||
|
||||
func (prober *flexVolumeProber) handleWatchEvent(event fsnotify.Event) error {
|
||||
@ -127,46 +157,67 @@ func (prober *flexVolumeProber) handleWatchEvent(event fsnotify.Event) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parentPathAbs := filepath.Dir(eventPathAbs)
|
||||
pluginDirAbs, err := filepath.Abs(prober.pluginDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the Flexvolume plugin directory is removed, need to recreate it
|
||||
// in order to keep it under watch.
|
||||
if eventOpIs(event, fsnotify.Remove) && eventPathAbs == pluginDirAbs {
|
||||
if err := prober.createPluginDir(); err != nil {
|
||||
return err
|
||||
// event of pluginDirAbs
|
||||
if eventPathAbs == pluginDirAbs {
|
||||
// If the Flexvolume plugin directory is removed, need to recreate it
|
||||
// in order to keep it under watch.
|
||||
if eventOpIs(event, fsnotify.Remove) {
|
||||
if err := prober.createPluginDir(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := prober.addWatchRecursive(pluginDirAbs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := prober.addWatchRecursive(pluginDirAbs); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if eventOpIs(event, fsnotify.Create) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// watch newly added subdirectories inside a driver directory
|
||||
if eventOpIs(event, fsnotify.Create) {
|
||||
if err := prober.addWatchRecursive(eventPathAbs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
prober.updateProbeNeeded()
|
||||
eventRelPathToPluginDir, err := filepath.Rel(pluginDirAbs, eventPathAbs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// event inside specific driver dir
|
||||
if len(eventRelPathToPluginDir) > 0 {
|
||||
driverDirName := strings.Split(eventRelPathToPluginDir, string(os.PathSeparator))[0]
|
||||
driverDirAbs := filepath.Join(pluginDirAbs, driverDirName)
|
||||
// executable is removed, will trigger ProbeRemove event
|
||||
if eventOpIs(event, fsnotify.Remove) && (eventRelPathToPluginDir == getExecutablePathRel(driverDirName) || parentPathAbs == pluginDirAbs) {
|
||||
prober.updateEventsMap(driverDirAbs, volume.ProbeRemove)
|
||||
} else {
|
||||
prober.updateEventsMap(driverDirAbs, volume.ProbeAddOrUpdate)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prober *flexVolumeProber) updateProbeNeeded() {
|
||||
// Within 'watchEventInterval' seconds, a max of 'watchEventLimit' watch events is processed.
|
||||
// The watch event will not be registered if the limit is reached.
|
||||
// This prevents increased disk usage from Probe() being triggered too frequently (either
|
||||
// accidentally or maliciously).
|
||||
if time.Since(prober.lastUpdated) > watchEventInterval {
|
||||
// Update, then reset the timer and watch count.
|
||||
prober.testAndSetProbeNeeded(true)
|
||||
prober.lastUpdated = time.Now()
|
||||
prober.watchEventCount = 1
|
||||
} else if prober.watchEventCount < watchEventLimit {
|
||||
prober.testAndSetProbeNeeded(true)
|
||||
prober.watchEventCount++
|
||||
// getExecutableName returns the executableName of a flex plugin
|
||||
func getExecutablePathRel(driverDirName string) string {
|
||||
parts := strings.Split(driverDirName, "~")
|
||||
return filepath.Join(driverDirName, parts[len(parts)-1])
|
||||
}
|
||||
|
||||
func (prober *flexVolumeProber) updateEventsMap(eventDirAbs string, op volume.ProbeOperation) {
|
||||
prober.mutex.Lock()
|
||||
defer prober.mutex.Unlock()
|
||||
if prober.probeAllNeeded {
|
||||
return
|
||||
}
|
||||
prober.eventsMap[eventDirAbs] = op
|
||||
}
|
||||
|
||||
// Recursively adds to watch all directories inside and including the file specified by the given filename.
|
||||
@ -176,7 +227,7 @@ func (prober *flexVolumeProber) updateProbeNeeded() {
|
||||
// on its parent directory.
|
||||
func (prober *flexVolumeProber) addWatchRecursive(filename string) error {
|
||||
addWatch := func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
if err == nil && info.IsDir() {
|
||||
if err := prober.watcher.AddWatch(path); err != nil {
|
||||
glog.Errorf("Error recursively adding watch: %v", err)
|
||||
}
|
||||
@ -222,10 +273,10 @@ func (prober *flexVolumeProber) createPluginDir() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prober *flexVolumeProber) testAndSetProbeNeeded(newval bool) (oldval bool) {
|
||||
func (prober *flexVolumeProber) testAndSetProbeAllNeeded(newval bool) (oldval bool) {
|
||||
prober.mutex.Lock()
|
||||
defer prober.mutex.Unlock()
|
||||
oldval, prober.probeNeeded = prober.probeNeeded, newval
|
||||
oldval, prober.probeAllNeeded = prober.probeAllNeeded, newval
|
||||
return
|
||||
}
|
||||
|
||||
|
187
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe_test.go
generated
vendored
187
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe_test.go
generated
vendored
@ -38,13 +38,13 @@ func TestProberExistingDriverBeforeInit(t *testing.T) {
|
||||
driverPath, _, watcher, prober := initTestEnvironment(t)
|
||||
|
||||
// Act
|
||||
updated, plugins, err := prober.Probe()
|
||||
events, err := prober.Probe()
|
||||
|
||||
// Assert
|
||||
// Probe occurs, 1 plugin should be returned, and 2 watches (pluginDir and all its
|
||||
// current subdirectories) registered.
|
||||
assert.True(t, updated)
|
||||
assert.Equal(t, 1, len(plugins))
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.Equal(t, pluginDir, watcher.watches[0])
|
||||
assert.Equal(t, driverPath, watcher.watches[1])
|
||||
assert.NoError(t, err)
|
||||
@ -52,67 +52,120 @@ func TestProberExistingDriverBeforeInit(t *testing.T) {
|
||||
// Should no longer probe.
|
||||
|
||||
// Act
|
||||
updated, plugins, err = prober.Probe()
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.False(t, updated)
|
||||
assert.Equal(t, 0, len(plugins))
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Probes newly added drivers after prober is running.
|
||||
func TestProberAddDriver(t *testing.T) {
|
||||
func TestProberAddRemoveDriver(t *testing.T) {
|
||||
// Arrange
|
||||
_, fs, watcher, prober := initTestEnvironment(t)
|
||||
prober.Probe()
|
||||
updated, _, _ := prober.Probe()
|
||||
assert.False(t, updated)
|
||||
events, err := prober.Probe()
|
||||
assert.Equal(t, 0, len(events))
|
||||
|
||||
// Call probe after a file is added. Should return true.
|
||||
// Call probe after a file is added. Should return 1 event.
|
||||
|
||||
// Arrange
|
||||
// add driver
|
||||
const driverName2 = "fake-driver2"
|
||||
driverPath := path.Join(pluginDir, driverName2)
|
||||
executablePath := path.Join(driverPath, driverName2)
|
||||
installDriver(driverName2, fs)
|
||||
watcher.TriggerEvent(fsnotify.Create, driverPath)
|
||||
watcher.TriggerEvent(fsnotify.Create, path.Join(driverPath, driverName2))
|
||||
watcher.TriggerEvent(fsnotify.Create, executablePath)
|
||||
|
||||
// Act
|
||||
updated, plugins, err := prober.Probe()
|
||||
events, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.True(t, updated)
|
||||
assert.Equal(t, 2, len(plugins)) // 1 existing, 1 newly added
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op) // 1 newly added
|
||||
assert.Equal(t, driverPath, watcher.watches[len(watcher.watches)-1]) // Checks most recent watch
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe again, should return false.
|
||||
// Call probe again, should return 0 event.
|
||||
|
||||
// Act
|
||||
updated, _, err = prober.Probe()
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.False(t, updated)
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe after a non-driver file is added in a subdirectory. Should return true.
|
||||
|
||||
// Arrange
|
||||
// Call probe after a non-driver file is added in a subdirectory. should return 1 event.
|
||||
fp := path.Join(driverPath, "dummyfile")
|
||||
fs.Create(fp)
|
||||
watcher.TriggerEvent(fsnotify.Create, fp)
|
||||
|
||||
// Act
|
||||
updated, plugins, err = prober.Probe()
|
||||
events, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.True(t, updated)
|
||||
assert.Equal(t, 2, len(plugins)) // Number of plugins should not change.
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe again, should return false.
|
||||
// Call probe again, should return 0 event.
|
||||
// Act
|
||||
updated, _, err = prober.Probe()
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.False(t, updated)
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe after a subdirectory is added in a driver directory. should return 1 event.
|
||||
subdirPath := path.Join(driverPath, "subdir")
|
||||
fs.Create(subdirPath)
|
||||
watcher.TriggerEvent(fsnotify.Create, subdirPath)
|
||||
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe again, should return 0 event.
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe after a subdirectory is removed in a driver directory. should return 1 event.
|
||||
fs.Remove(subdirPath)
|
||||
watcher.TriggerEvent(fsnotify.Remove, subdirPath)
|
||||
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe again, should return 0 event.
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe after a driver executable and driver directory is remove. should return 1 event.
|
||||
fs.Remove(executablePath)
|
||||
fs.Remove(driverPath)
|
||||
watcher.TriggerEvent(fsnotify.Remove, executablePath)
|
||||
watcher.TriggerEvent(fsnotify.Remove, driverPath)
|
||||
// Act and Assert: 1 ProbeRemove event
|
||||
events, err = prober.Probe()
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Equal(t, volume.ProbeRemove, events[0].Op)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Act and Assert: 0 event
|
||||
events, err = prober.Probe()
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -130,11 +183,10 @@ func TestEmptyPluginDir(t *testing.T) {
|
||||
prober.Init()
|
||||
|
||||
// Act
|
||||
updated, plugins, err := prober.Probe()
|
||||
events, err := prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.True(t, updated)
|
||||
assert.Equal(t, 0, len(plugins))
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -154,7 +206,37 @@ func TestRemovePluginDir(t *testing.T) {
|
||||
assert.Equal(t, pluginDir, watcher.watches[len(watcher.watches)-1])
|
||||
}
|
||||
|
||||
// Issue multiple events and probe multiple times. Should give true, false, false...
|
||||
// Issue an event to remove plugindir. New directory should still be watched.
|
||||
func TestNestedDriverDir(t *testing.T) {
|
||||
// Arrange
|
||||
_, fs, watcher, _ := initTestEnvironment(t)
|
||||
// Assert
|
||||
assert.Equal(t, 2, len(watcher.watches)) // 2 from initial setup
|
||||
|
||||
// test add testDriverName
|
||||
testDriverName := "testDriverName"
|
||||
testDriverPath := path.Join(pluginDir, testDriverName)
|
||||
fs.MkdirAll(testDriverPath, 0666)
|
||||
watcher.TriggerEvent(fsnotify.Create, testDriverPath)
|
||||
// Assert
|
||||
assert.Equal(t, 3, len(watcher.watches)) // 2 from initial setup, 1 from new watch.
|
||||
assert.Equal(t, testDriverPath, watcher.watches[len(watcher.watches)-1])
|
||||
|
||||
// test add nested subdir inside testDriverName
|
||||
basePath := testDriverPath
|
||||
for i := 0; i < 10; i++ {
|
||||
subdirName := "subdirName"
|
||||
subdirPath := path.Join(basePath, subdirName)
|
||||
fs.MkdirAll(subdirPath, 0666)
|
||||
watcher.TriggerEvent(fsnotify.Create, subdirPath)
|
||||
// Assert
|
||||
assert.Equal(t, 4+i, len(watcher.watches)) // 3 + newly added
|
||||
assert.Equal(t, subdirPath, watcher.watches[len(watcher.watches)-1])
|
||||
basePath = subdirPath
|
||||
}
|
||||
}
|
||||
|
||||
// Issue multiple events and probe multiple times.
|
||||
func TestProberMultipleEvents(t *testing.T) {
|
||||
const iterations = 5
|
||||
|
||||
@ -169,49 +251,20 @@ func TestProberMultipleEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
// Act
|
||||
updated, _, err := prober.Probe()
|
||||
events, err := prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.True(t, updated)
|
||||
assert.Equal(t, 2, len(events))
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[1].Op)
|
||||
assert.NoError(t, err)
|
||||
for i := 0; i < iterations-1; i++ {
|
||||
updated, _, err = prober.Probe()
|
||||
assert.False(t, updated)
|
||||
events, err = prober.Probe()
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// When many events are triggered quickly in succession, events should stop triggering a probe update
|
||||
// after a certain limit.
|
||||
func TestProberRateLimit(t *testing.T) {
|
||||
// Arrange
|
||||
driverPath, _, watcher, prober := initTestEnvironment(t)
|
||||
for i := 0; i < watchEventLimit; i++ {
|
||||
watcher.TriggerEvent(fsnotify.Write, path.Join(driverPath, driverName))
|
||||
}
|
||||
|
||||
// Act
|
||||
updated, plugins, err := prober.Probe()
|
||||
|
||||
// Assert
|
||||
// The probe results should not be different from what it would be if none of the events
|
||||
// are triggered.
|
||||
assert.True(t, updated)
|
||||
assert.Equal(t, 1, len(plugins))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Arrange
|
||||
watcher.TriggerEvent(fsnotify.Write, path.Join(driverPath, driverName))
|
||||
|
||||
// Act
|
||||
updated, _, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
// The last event is outside the event limit. Should not trigger a probe.
|
||||
assert.False(t, updated)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProberError(t *testing.T) {
|
||||
fs := utilfs.NewFakeFs()
|
||||
watcher := NewFakeWatcher()
|
||||
@ -224,7 +277,7 @@ func TestProberError(t *testing.T) {
|
||||
installDriver(driverName, fs)
|
||||
prober.Init()
|
||||
|
||||
_, _, err := prober.Probe()
|
||||
_, err := prober.Probe()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go
generated
vendored
@ -54,7 +54,7 @@ type flockerVolumeProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &flockerVolumeProvisioner{}
|
||||
|
||||
func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *flockerVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
@ -67,6 +67,10 @@ func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
return nil, fmt.Errorf("Provisioning failed: Specified unsupported selector")
|
||||
}
|
||||
|
||||
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
datasetUUID, sizeGB, labels, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume_test.go
generated
vendored
@ -57,7 +57,7 @@ func TestProvision(t *testing.T) {
|
||||
dir, provisioner := newTestableProvisioner(assert, options)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
assert.NoError(err, "Provision() failed: ", err)
|
||||
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
@ -85,7 +85,7 @@ func TestProvision(t *testing.T) {
|
||||
|
||||
dir, provisioner = newTestableProvisioner(assert, options)
|
||||
defer os.RemoveAll(dir)
|
||||
persistentSpec, err = provisioner.Provision()
|
||||
persistentSpec, err = provisioner.Provision(nil, nil)
|
||||
assert.Error(err, "Provision() did not fail with Parameters specified")
|
||||
|
||||
// selectors are not supported
|
||||
@ -97,6 +97,6 @@ func TestProvision(t *testing.T) {
|
||||
|
||||
dir, provisioner = newTestableProvisioner(assert, options)
|
||||
defer os.RemoveAll(dir)
|
||||
persistentSpec, err = provisioner.Provision()
|
||||
persistentSpec, err = provisioner.Provision(nil, nil)
|
||||
assert.Error(err, "Provision() did not fail with Selector specified")
|
||||
}
|
||||
|
76
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go
generated
vendored
76
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go
generated
vendored
@ -17,16 +17,21 @@ limitations under the License.
|
||||
package gce_pd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -47,11 +52,24 @@ var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.ExpandableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||
var _ volume.VolumePluginWithAttachLimits = &gcePersistentDiskPlugin{}
|
||||
|
||||
const (
|
||||
gcePersistentDiskPluginName = "kubernetes.io/gce-pd"
|
||||
)
|
||||
|
||||
// The constants are used to map from the machine type (number of CPUs) to the limit of
|
||||
// persistent disks that can be attached to an instance. Please refer to gcloud doc
|
||||
// https://cloud.google.com/compute/docs/disks/#increased_persistent_disk_limits
|
||||
const (
|
||||
OneCPU = 1
|
||||
EightCPUs = 8
|
||||
VolumeLimit16 = 16
|
||||
VolumeLimit32 = 32
|
||||
VolumeLimit64 = 64
|
||||
VolumeLimit128 = 128
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(gcePersistentDiskPluginName), volName)
|
||||
}
|
||||
@ -98,6 +116,58 @@ func (plugin *gcePersistentDiskPlugin) GetAccessModes() []v1.PersistentVolumeAcc
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) GetVolumeLimits() (map[string]int64, error) {
|
||||
volumeLimits := map[string]int64{
|
||||
util.GCEVolumeLimitKey: VolumeLimit16,
|
||||
}
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
|
||||
// if we can't fetch cloudprovider we return an error
|
||||
// hoping external CCM or admin can set it. Returning
|
||||
// default values from here will mean, no one can
|
||||
// override them.
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("No cloudprovider present")
|
||||
}
|
||||
|
||||
if cloud.ProviderName() != gcecloud.ProviderName {
|
||||
return nil, fmt.Errorf("Expected gce cloud got %s", cloud.ProviderName())
|
||||
}
|
||||
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
glog.Warning("Failed to get instances from cloud provider")
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance type from GCE cloud provider")
|
||||
return volumeLimits, nil
|
||||
}
|
||||
if strings.HasPrefix(instanceType, "n1-") {
|
||||
splits := strings.Split(instanceType, "-")
|
||||
if len(splits) < 3 {
|
||||
return volumeLimits, nil
|
||||
}
|
||||
last := splits[2]
|
||||
if num, err := strconv.Atoi(last); err == nil {
|
||||
if num == OneCPU {
|
||||
volumeLimits[util.GCEVolumeLimitKey] = VolumeLimit32
|
||||
} else if num < EightCPUs {
|
||||
volumeLimits[util.GCEVolumeLimitKey] = VolumeLimit64
|
||||
} else {
|
||||
volumeLimits[util.GCEVolumeLimitKey] = VolumeLimit128
|
||||
}
|
||||
}
|
||||
}
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) VolumeLimitKey(spec *volume.Spec) string {
|
||||
return util.GCEVolumeLimitKey
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
@ -396,7 +466,7 @@ type gcePersistentDiskProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
|
||||
|
||||
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
@ -448,5 +518,9 @@ func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error)
|
||||
}
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
@ -151,6 +152,10 @@ func (b *gcePersistentDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *gcePersistentDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/pdName
|
||||
func (pd *gcePersistentDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go
generated
vendored
@ -167,7 +167,7 @@ func TestPlugin(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error creating new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD
generated
vendored
@ -14,12 +14,12 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/git_repo",
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -28,13 +28,14 @@ go_test(
|
||||
srcs = ["git_repo_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
51
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go
generated
vendored
@ -20,14 +20,15 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
@ -90,6 +91,10 @@ func (plugin *gitRepoPlugin) SupportsBulkVolumeVerification() bool {
|
||||
}
|
||||
|
||||
func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
if err := validateVolume(spec.Volume.GitRepo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gitRepoVolumeMounter{
|
||||
gitRepoVolume: &gitRepoVolume{
|
||||
volName: spec.Name(),
|
||||
@ -100,8 +105,7 @@ func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts vol
|
||||
source: spec.Volume.GitRepo.Repository,
|
||||
revision: spec.Volume.GitRepo.Revision,
|
||||
target: spec.Volume.GitRepo.Directory,
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
exec: plugin.host.GetExec(plugin.GetPluginName()),
|
||||
exec: exec.New(),
|
||||
opts: opts,
|
||||
}, nil
|
||||
}
|
||||
@ -150,8 +154,7 @@ type gitRepoVolumeMounter struct {
|
||||
source string
|
||||
revision string
|
||||
target string
|
||||
mounter mount.Interface
|
||||
exec mount.Exec
|
||||
exec exec.Interface
|
||||
opts volume.VolumeOptions
|
||||
}
|
||||
|
||||
@ -192,12 +195,12 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{"clone", b.source}
|
||||
args := []string{"clone", "--", b.source}
|
||||
|
||||
if len(b.target) != 0 {
|
||||
args = append(args, b.target)
|
||||
}
|
||||
if output, err := b.execGit(args, dir); err != nil {
|
||||
if output, err := b.execCommand("git", args, dir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git %s': %s: %v",
|
||||
strings.Join(args, " "), output, err)
|
||||
}
|
||||
@ -216,7 +219,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
var subdir string
|
||||
|
||||
switch {
|
||||
case b.target == ".":
|
||||
case len(b.target) != 0 && filepath.Clean(b.target) == ".":
|
||||
// if target dir is '.', use the current dir
|
||||
subdir = path.Join(dir)
|
||||
case len(files) == 1:
|
||||
@ -227,10 +230,10 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return fmt.Errorf("unexpected directory contents: %v", files)
|
||||
}
|
||||
|
||||
if output, err := b.execGit([]string{"checkout", b.revision}, subdir); err != nil {
|
||||
if output, err := b.execCommand("git", []string{"checkout", b.revision}, subdir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err)
|
||||
}
|
||||
if output, err := b.execGit([]string{"reset", "--hard"}, subdir); err != nil {
|
||||
if output, err := b.execCommand("git", []string{"reset", "--hard"}, subdir); err != nil {
|
||||
return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err)
|
||||
}
|
||||
|
||||
@ -244,10 +247,23 @@ func (b *gitRepoVolumeMounter) getMetaDir() string {
|
||||
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(gitRepoPluginName)), b.volName)
|
||||
}
|
||||
|
||||
func (b *gitRepoVolumeMounter) execGit(args []string, dir string) ([]byte, error) {
|
||||
// run git -C <dir> <args>
|
||||
fullArgs := append([]string{"-C", dir}, args...)
|
||||
return b.exec.Run("git", fullArgs...)
|
||||
func (b *gitRepoVolumeMounter) execCommand(command string, args []string, dir string) ([]byte, error) {
|
||||
cmd := b.exec.Command(command, args...)
|
||||
cmd.SetDir(dir)
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
func validateVolume(src *v1.GitRepoVolumeSource) error {
|
||||
if err := validateNonFlagArgument(src.Repository, "repository"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateNonFlagArgument(src.Revision, "revision"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateNonFlagArgument(src.Directory, "directory"); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gitRepoVolumeUnmounter cleans git repo volumes.
|
||||
@ -278,3 +294,10 @@ func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
||||
|
||||
func validateNonFlagArgument(arg, argName string) error {
|
||||
if len(arg) > 0 && arg[0] == '-' {
|
||||
return fmt.Errorf("%q is an invalid value for %s", arg, argName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
248
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo_test.go
generated
vendored
248
vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo_test.go
generated
vendored
@ -28,16 +28,11 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
gitUrl = "https://github.com/kubernetes/kubernetes.git"
|
||||
revision = "2a30ce65c5ab586b98916d83385c5983edd353a1"
|
||||
gitRepositoryName = "kubernetes"
|
||||
"k8s.io/utils/exec"
|
||||
fakeexec "k8s.io/utils/exec/testing"
|
||||
)
|
||||
|
||||
func newTestHost(t *testing.T) (string, volume.VolumeHost) {
|
||||
@ -67,18 +62,23 @@ func TestCanSupport(t *testing.T) {
|
||||
}
|
||||
|
||||
// Expected command
|
||||
type expectedCommand []string
|
||||
|
||||
type testScenario struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
repositoryDir string
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
type expectedCommand struct {
|
||||
// The git command
|
||||
cmd []string
|
||||
// The dir of git command is executed
|
||||
dir string
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
scenarios := []testScenario{
|
||||
gitUrl := "https://github.com/kubernetes/kubernetes.git"
|
||||
revision := "2a30ce65c5ab586b98916d83385c5983edd353a1"
|
||||
|
||||
scenarios := []struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}{
|
||||
{
|
||||
name: "target-dir",
|
||||
vol: &v1.Volume{
|
||||
@ -91,11 +91,19 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "target_dir",
|
||||
expecteds: []expectedCommand{
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl, "target_dir"},
|
||||
[]string{"git", "-C", "volume-dir/target_dir", "checkout", revision},
|
||||
[]string{"git", "-C", "volume-dir/target_dir", "reset", "--hard"},
|
||||
{
|
||||
cmd: []string{"git", "clone", "--", gitUrl, "target_dir"},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "/target_dir",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "/target_dir",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -110,9 +118,11 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "target_dir",
|
||||
expecteds: []expectedCommand{
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl, "target_dir"},
|
||||
{
|
||||
cmd: []string{"git", "clone", "--", gitUrl, "target_dir"},
|
||||
dir: "",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -126,9 +136,11 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "kubernetes",
|
||||
expecteds: []expectedCommand{
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl},
|
||||
{
|
||||
cmd: []string{"git", "clone", "--", gitUrl},
|
||||
dir: "",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -144,11 +156,19 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "kubernetes",
|
||||
expecteds: []expectedCommand{
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl},
|
||||
[]string{"git", "-C", "volume-dir/kubernetes", "checkout", revision},
|
||||
[]string{"git", "-C", "volume-dir/kubernetes", "reset", "--hard"},
|
||||
{
|
||||
cmd: []string{"git", "clone", "--", gitUrl},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "/kubernetes",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "/kubernetes",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
@ -164,14 +184,88 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
repositoryDir: "",
|
||||
expecteds: []expectedCommand{
|
||||
[]string{"git", "-C", "volume-dir", "clone", gitUrl, "."},
|
||||
[]string{"git", "-C", "volume-dir", "checkout", revision},
|
||||
[]string{"git", "-C", "volume-dir", "reset", "--hard"},
|
||||
{
|
||||
cmd: []string{"git", "clone", "--", gitUrl, "."},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
{
|
||||
name: "current-dir-mess",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: gitUrl,
|
||||
Revision: revision,
|
||||
Directory: "./.",
|
||||
},
|
||||
},
|
||||
},
|
||||
expecteds: []expectedCommand{
|
||||
{
|
||||
cmd: []string{"git", "clone", "--", gitUrl, "./."},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "checkout", revision},
|
||||
dir: "",
|
||||
},
|
||||
{
|
||||
cmd: []string{"git", "reset", "--hard"},
|
||||
dir: "",
|
||||
},
|
||||
},
|
||||
isExpectedFailure: false,
|
||||
},
|
||||
{
|
||||
name: "invalid-repository",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: "--foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
isExpectedFailure: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-revision",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: gitUrl,
|
||||
Revision: "--bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
isExpectedFailure: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-directory",
|
||||
vol: &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GitRepo: &v1.GitRepoVolumeSource{
|
||||
Repository: gitUrl,
|
||||
Directory: "-b",
|
||||
},
|
||||
},
|
||||
},
|
||||
isExpectedFailure: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
@ -186,7 +280,12 @@ func TestPlugin(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func doTestPlugin(scenario testScenario, t *testing.T) []error {
|
||||
func doTestPlugin(scenario struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}, t *testing.T) []error {
|
||||
allErrs := []error{}
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
@ -278,42 +377,73 @@ func doTestPlugin(scenario testScenario, t *testing.T) []error {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func doTestSetUp(scenario testScenario, mounter volume.Mounter) []error {
|
||||
func doTestSetUp(scenario struct {
|
||||
name string
|
||||
vol *v1.Volume
|
||||
expecteds []expectedCommand
|
||||
isExpectedFailure bool
|
||||
}, mounter volume.Mounter) []error {
|
||||
expecteds := scenario.expecteds
|
||||
allErrs := []error{}
|
||||
|
||||
var commandLog []expectedCommand
|
||||
execCallback := func(cmd string, args ...string) ([]byte, error) {
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("expected at least 2 arguments, got %q", args)
|
||||
// Construct combined outputs from expected commands
|
||||
var fakeOutputs []fakeexec.FakeCombinedOutputAction
|
||||
var fcmd fakeexec.FakeCmd
|
||||
for _, expected := range expecteds {
|
||||
if expected.cmd[1] == "clone" {
|
||||
fakeOutputs = append(fakeOutputs, func() ([]byte, error) {
|
||||
// git clone, it creates new dir/files
|
||||
os.MkdirAll(path.Join(fcmd.Dirs[0], expected.dir), 0750)
|
||||
return []byte{}, nil
|
||||
})
|
||||
} else {
|
||||
// git checkout || git reset, they create nothing
|
||||
fakeOutputs = append(fakeOutputs, func() ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
})
|
||||
}
|
||||
if args[0] != "-C" {
|
||||
return nil, fmt.Errorf("expected the first argument to be \"-C\", got %q", args[0])
|
||||
}
|
||||
// command is 'git -C <dir> <command> <args>
|
||||
gitDir := args[1]
|
||||
gitCommand := args[2]
|
||||
if gitCommand == "clone" {
|
||||
// Clone creates a directory
|
||||
if scenario.repositoryDir != "" {
|
||||
os.MkdirAll(path.Join(gitDir, scenario.repositoryDir), 0750)
|
||||
}
|
||||
}
|
||||
// add the command to log with de-randomized gitDir
|
||||
args[1] = strings.Replace(gitDir, mounter.GetPath(), "volume-dir", 1)
|
||||
cmdline := append([]string{cmd}, args...)
|
||||
commandLog = append(commandLog, cmdline)
|
||||
return []byte{}, nil
|
||||
}
|
||||
fcmd = fakeexec.FakeCmd{
|
||||
CombinedOutputScript: fakeOutputs,
|
||||
}
|
||||
|
||||
// Construct fake exec outputs from fcmd
|
||||
var fakeAction []fakeexec.FakeCommandAction
|
||||
for i := 0; i < len(expecteds); i++ {
|
||||
fakeAction = append(fakeAction, func(cmd string, args ...string) exec.Cmd {
|
||||
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
||||
})
|
||||
|
||||
}
|
||||
fake := fakeexec.FakeExec{
|
||||
CommandScript: fakeAction,
|
||||
}
|
||||
|
||||
g := mounter.(*gitRepoVolumeMounter)
|
||||
g.mounter = &mount.FakeMounter{}
|
||||
g.exec = mount.NewFakeExec(execCallback)
|
||||
g.exec = &fake
|
||||
|
||||
g.SetUp(nil)
|
||||
|
||||
if !reflect.DeepEqual(expecteds, commandLog) {
|
||||
if fake.CommandCalls != len(expecteds) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected commands: %v, expected: %v", commandLog, expecteds))
|
||||
fmt.Errorf("unexpected command calls in scenario: expected %d, saw: %d", len(expecteds), fake.CommandCalls))
|
||||
}
|
||||
var expectedCmds [][]string
|
||||
for _, expected := range expecteds {
|
||||
expectedCmds = append(expectedCmds, expected.cmd)
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCmds, fcmd.CombinedOutputLog) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected commands: %v, expected: %v", fcmd.CombinedOutputLog, expectedCmds))
|
||||
}
|
||||
|
||||
var expectedPaths []string
|
||||
for _, expected := range expecteds {
|
||||
expectedPaths = append(expectedPaths, g.GetPath()+expected.dir)
|
||||
}
|
||||
if len(fcmd.Dirs) != len(expectedPaths) || !reflect.DeepEqual(expectedPaths, fcmd.Dirs) {
|
||||
allErrs = append(allErrs,
|
||||
fmt.Errorf("unexpected directories: %v, expected: %v", fcmd.Dirs, expectedPaths))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
70
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go
generated
vendored
@ -110,12 +110,8 @@ func (plugin *glusterfsPlugin) GetVolumeName(spec *volume.Spec) (string, error)
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
if (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Glusterfs == nil) ||
|
||||
(spec.Volume != nil && spec.Volume.Glusterfs == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Glusterfs != nil) ||
|
||||
(spec.Volume != nil && spec.Volume.Glusterfs != nil)
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) RequiresRemount() bool {
|
||||
@ -401,18 +397,19 @@ func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptio
|
||||
}
|
||||
|
||||
type provisionerConfig struct {
|
||||
url string
|
||||
user string
|
||||
userKey string
|
||||
secretNamespace string
|
||||
secretName string
|
||||
secretValue string
|
||||
clusterID string
|
||||
gidMin int
|
||||
gidMax int
|
||||
volumeType gapi.VolumeDurabilityInfo
|
||||
volumeOptions []string
|
||||
volumeNamePrefix string
|
||||
url string
|
||||
user string
|
||||
userKey string
|
||||
secretNamespace string
|
||||
secretName string
|
||||
secretValue string
|
||||
clusterID string
|
||||
gidMin int
|
||||
gidMax int
|
||||
volumeType gapi.VolumeDurabilityInfo
|
||||
volumeOptions []string
|
||||
volumeNamePrefix string
|
||||
thinPoolSnapFactor float32
|
||||
}
|
||||
|
||||
type glusterfsVolumeProvisioner struct {
|
||||
@ -667,7 +664,7 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
@ -676,6 +673,11 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
glog.V(4).Infof("not able to parse your claim Selector")
|
||||
return nil, fmt.Errorf("not able to parse your claim Selector")
|
||||
}
|
||||
|
||||
if volutil.CheckPersistentVolumeClaimModeBlock(p.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Provision VolumeOptions %v", p.options)
|
||||
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
|
||||
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
|
||||
@ -764,7 +766,15 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum
|
||||
}
|
||||
|
||||
gid64 := int64(gid)
|
||||
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions}
|
||||
snaps := struct {
|
||||
Enable bool `json:"enable"`
|
||||
Factor float32 `json:"factor"`
|
||||
}{
|
||||
true,
|
||||
p.provisionerConfig.thinPoolSnapFactor,
|
||||
}
|
||||
|
||||
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions, Snapshot: snaps}
|
||||
volume, err := cli.VolumeCreate(volumeReq)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create volume: %v", err)
|
||||
@ -931,6 +941,10 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
parseVolumeType := ""
|
||||
parseVolumeOptions := ""
|
||||
parseVolumeNamePrefix := ""
|
||||
parseThinPoolSnapFactor := ""
|
||||
|
||||
//thin pool snap factor default to 1.0
|
||||
cfg.thinPoolSnapFactor = float32(1.0)
|
||||
|
||||
for k, v := range params {
|
||||
switch dstrings.ToLower(k) {
|
||||
@ -985,6 +999,11 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(v) != 0 {
|
||||
parseVolumeNamePrefix = v
|
||||
}
|
||||
case "snapfactor":
|
||||
if len(v) != 0 {
|
||||
parseThinPoolSnapFactor = v
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
@ -1072,6 +1091,17 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
}
|
||||
cfg.volumeNamePrefix = parseVolumeNamePrefix
|
||||
}
|
||||
|
||||
if len(parseThinPoolSnapFactor) != 0 {
|
||||
thinPoolSnapFactor, err := strconv.ParseFloat(parseThinPoolSnapFactor, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert snapfactor %v to float: %v", parseThinPoolSnapFactor, err)
|
||||
}
|
||||
if thinPoolSnapFactor < 1.0 || thinPoolSnapFactor > 100.0 {
|
||||
return nil, fmt.Errorf("invalid snapshot factor %v, the value must be between 1 to 100", thinPoolSnapFactor)
|
||||
}
|
||||
cfg.thinPoolSnapFactor = float32(thinPoolSnapFactor)
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
|
187
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_test.go
generated
vendored
187
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_test.go
generated
vendored
@ -51,12 +51,24 @@ func TestCanSupport(t *testing.T) {
|
||||
if plug.GetPluginName() != "kubernetes.io/glusterfs" {
|
||||
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
|
||||
if plug.CanSupport(&volume.Spec{}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModes(t *testing.T) {
|
||||
@ -241,13 +253,14 @@ func TestParseClassParameters(t *testing.T) {
|
||||
nil, // secret
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
user: "admin",
|
||||
userKey: "password",
|
||||
secretValue: "password",
|
||||
gidMin: 2000,
|
||||
gidMax: 2147483647,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
url: "https://localhost:8080",
|
||||
user: "admin",
|
||||
userKey: "password",
|
||||
secretValue: "password",
|
||||
gidMin: 2000,
|
||||
gidMax: 2147483647,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
thinPoolSnapFactor: float32(1.0),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -261,14 +274,15 @@ func TestParseClassParameters(t *testing.T) {
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
user: "admin",
|
||||
secretName: "mysecret",
|
||||
secretNamespace: "default",
|
||||
secretValue: "mypassword",
|
||||
gidMin: 2000,
|
||||
gidMax: 2147483647,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
url: "https://localhost:8080",
|
||||
user: "admin",
|
||||
secretName: "mysecret",
|
||||
secretNamespace: "default",
|
||||
secretValue: "mypassword",
|
||||
gidMin: 2000,
|
||||
gidMax: 2147483647,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
thinPoolSnapFactor: float32(1.0),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -280,10 +294,11 @@ func TestParseClassParameters(t *testing.T) {
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 2000,
|
||||
gidMax: 2147483647,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 2000,
|
||||
gidMax: 2147483647,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
thinPoolSnapFactor: float32(1.0),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -417,10 +432,11 @@ func TestParseClassParameters(t *testing.T) {
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 2147483647,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 2147483647,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
thinPoolSnapFactor: float32(1.0),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -433,10 +449,11 @@ func TestParseClassParameters(t *testing.T) {
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 2000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 2000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
thinPoolSnapFactor: float32(1.0),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -450,10 +467,11 @@ func TestParseClassParameters(t *testing.T) {
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
thinPoolSnapFactor: float32(1.0),
|
||||
},
|
||||
},
|
||||
|
||||
@ -469,10 +487,11 @@ func TestParseClassParameters(t *testing.T) {
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
|
||||
thinPoolSnapFactor: float32(1.0),
|
||||
},
|
||||
},
|
||||
|
||||
@ -488,10 +507,54 @@ func TestParseClassParameters(t *testing.T) {
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
|
||||
thinPoolSnapFactor: float32(1.0),
|
||||
},
|
||||
},
|
||||
{
|
||||
"valid snapfactor: 50",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restauthenabled": "false",
|
||||
"gidMin": "4000",
|
||||
"gidMax": "5000",
|
||||
"volumetype": "disperse:4:2",
|
||||
"snapfactor": "50",
|
||||
},
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
|
||||
thinPoolSnapFactor: float32(50),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"valid volumenameprefix: dept-dev",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restauthenabled": "false",
|
||||
"gidMin": "4000",
|
||||
"gidMax": "5000",
|
||||
"volumetype": "disperse:4:2",
|
||||
"snapfactor": "50",
|
||||
"volumenameprefix": "dept-dev",
|
||||
},
|
||||
&secret,
|
||||
false, // expect error
|
||||
&provisionerConfig{
|
||||
url: "https://localhost:8080",
|
||||
gidMin: 4000,
|
||||
gidMax: 5000,
|
||||
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
|
||||
thinPoolSnapFactor: float32(50),
|
||||
volumeNamePrefix: "dept-dev",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -538,6 +601,50 @@ func TestParseClassParameters(t *testing.T) {
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"invalid thinPoolSnapFactor: value out of range",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restauthenabled": "false",
|
||||
"snapfactor": "0.5",
|
||||
},
|
||||
&secret,
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"invalid volumenameprefix: string starting with '_'",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restauthenabled": "false",
|
||||
"volumenameprefix": "_",
|
||||
},
|
||||
&secret,
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"invalid volumenameprefix: string with '_'",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restauthenabled": "false",
|
||||
"volumenameprefix": "qe_dept",
|
||||
},
|
||||
&secret,
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"invalid thinPoolSnapFactor: value out of range",
|
||||
map[string]string{
|
||||
"resturl": "https://localhost:8080",
|
||||
"restauthenabled": "false",
|
||||
"snapfactor": "120",
|
||||
},
|
||||
&secret,
|
||||
true, // expect error
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go
generated
vendored
@ -265,7 +265,11 @@ type hostPathProvisioner struct {
|
||||
|
||||
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
|
||||
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
|
||||
func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (r *hostPathProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if util.CheckPersistentVolumeClaimModeBlock(r.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", r.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
|
||||
|
||||
capacity := r.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
@ -350,7 +354,8 @@ type fileTypeChecker struct {
|
||||
}
|
||||
|
||||
func (ftc *fileTypeChecker) Exists() bool {
|
||||
return ftc.mounter.ExistsPath(ftc.path)
|
||||
exists, err := ftc.mounter.ExistsPath(ftc.path)
|
||||
return exists && err == nil
|
||||
}
|
||||
|
||||
func (ftc *fileTypeChecker) IsFile() bool {
|
||||
|
35
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go
generated
vendored
35
vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package host_path
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
@ -177,7 +178,7 @@ func TestProvisioner(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Provisioner: %v", err)
|
||||
}
|
||||
pv, err := creater.Provision()
|
||||
pv, err := creater.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error creating volume: %v", err)
|
||||
}
|
||||
@ -368,14 +369,42 @@ func (fftc *fakeFileTypeChecker) MakeDir(pathname string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) ExistsPath(pathname string) bool {
|
||||
return true
|
||||
func (fftc *fakeFileTypeChecker) ExistsPath(pathname string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) GetFileType(_ string) (utilmount.FileType, error) {
|
||||
return utilmount.FileType(fftc.desiredType), nil
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) PrepareSafeSubpath(subPath utilmount.Subpath) (newHostPath string, cleanupAction func(), err error) {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) CleanSubPaths(_, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) SafeMakeDir(_, _ string, _ os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) GetMountRefs(pathname string) ([]string, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) GetFSGroup(pathname string) (int64, error) {
|
||||
return -1, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) GetSELinuxSupport(pathname string) (bool, error) {
|
||||
return false, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (fftc *fakeFileTypeChecker) GetMode(pathname string) (os.FileMode, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func setUp() error {
|
||||
err := os.MkdirAll("/tmp/ExistingFolder", os.FileMode(0755))
|
||||
if err != nil {
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go
generated
vendored
@ -112,6 +112,12 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if iscsiDisk != nil {
|
||||
|
||||
//Add volume metrics
|
||||
iscsiDisk.MetricsProvider = volume.NewMetricsStatFS(iscsiDisk.GetPath())
|
||||
}
|
||||
return &iscsiDiskMounter{
|
||||
iscsiDisk: iscsiDisk,
|
||||
fsType: fsType,
|
||||
@ -164,10 +170,11 @@ func (plugin *iscsiPlugin) NewUnmounter(volName string, podUID types.UID) (volum
|
||||
func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Unmounter, error) {
|
||||
return &iscsiDiskUnmounter{
|
||||
iscsiDisk: &iscsiDisk{
|
||||
podUID: podUID,
|
||||
VolName: volName,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
podUID: podUID,
|
||||
VolName: volName,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(podUID, utilstrings.EscapeQualifiedNameForDisk(iscsiPluginName), volName)),
|
||||
},
|
||||
mounter: mounter,
|
||||
exec: exec,
|
||||
@ -264,7 +271,7 @@ type iscsiDisk struct {
|
||||
plugin *iscsiPlugin
|
||||
// Utility interface that provides API calls to the provider to attach/detach disks.
|
||||
manager diskManager
|
||||
volume.MetricsNil
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
func (iscsi *iscsiDisk) GetPath() string {
|
||||
@ -360,6 +367,10 @@ func (b *iscsiDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *iscsiDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return ioutil.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
type iscsiDiskUnmapper struct {
|
||||
*iscsiDisk
|
||||
exec mount.Exec
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go
generated
vendored
@ -396,25 +396,16 @@ func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *I
|
||||
|
||||
// DetachDisk unmounts and detaches a volume from node
|
||||
func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
|
||||
_, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath)
|
||||
if err != nil {
|
||||
glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
|
||||
return err
|
||||
}
|
||||
if pathExists, pathErr := volumeutil.PathExists(mntPath); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath)
|
||||
return nil
|
||||
}
|
||||
if err = c.mounter.Unmount(mntPath); err != nil {
|
||||
if err := c.mounter.Unmount(mntPath); err != nil {
|
||||
glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err)
|
||||
return err
|
||||
}
|
||||
cnt--
|
||||
if cnt != 0 {
|
||||
return nil
|
||||
}
|
||||
// if device is no longer used, see if need to logout the target
|
||||
device, prefix, err := extractDeviceAndPrefix(mntPath)
|
||||
if err != nil {
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/volume/local/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/local/BUILD
generated
vendored
@ -20,7 +20,6 @@ go_library(
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -32,6 +31,10 @@ go_test(
|
||||
"local_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"local_linux_test.go",
|
||||
"local_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"local_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
@ -54,6 +57,14 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
59
vendor/k8s.io/kubernetes/pkg/volume/local/local.go
generated
vendored
59
vendor/k8s.io/kubernetes/pkg/volume/local/local.go
generated
vendored
@ -19,19 +19,20 @@ package local
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
stringsutil "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/validation"
|
||||
@ -59,10 +60,7 @@ const (
|
||||
func (plugin *localVolumePlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.volumeLocks = keymutex.NewKeyMutex()
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "localvolume"})
|
||||
plugin.recorder = recorder
|
||||
plugin.recorder = host.GetEventRecorder()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -222,7 +220,7 @@ type localVolume struct {
|
||||
}
|
||||
|
||||
func (l *localVolume) GetPath() string {
|
||||
return l.plugin.host.GetPodVolumeDir(l.podUID, strings.EscapeQualifiedNameForDisk(localVolumePluginName), l.volName)
|
||||
return l.plugin.host.GetPodVolumeDir(l.podUID, stringsutil.EscapeQualifiedNameForDisk(localVolumePluginName), l.volName)
|
||||
}
|
||||
|
||||
type localVolumeMounter struct {
|
||||
@ -276,31 +274,35 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
refs, err := mount.GetMountRefsByDev(m.mounter, m.globalPath)
|
||||
refs, err := m.mounter.GetMountRefs(m.globalPath)
|
||||
if fsGroup != nil {
|
||||
if err != nil {
|
||||
glog.Errorf("cannot collect mounting information: %s %v", m.globalPath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Only count mounts from other pods
|
||||
refs = m.filterPodMounts(refs)
|
||||
if len(refs) > 0 {
|
||||
fsGroupNew := int64(*fsGroup)
|
||||
fsGroupSame, fsGroupOld, err := volume.IsSameFSGroup(m.globalPath, fsGroupNew)
|
||||
fsGroupOld, err := m.mounter.GetFSGroup(m.globalPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check fsGroup for %s (%v)", m.globalPath, err)
|
||||
}
|
||||
if !fsGroupSame {
|
||||
if fsGroupNew != fsGroupOld {
|
||||
m.plugin.recorder.Eventf(m.pod, v1.EventTypeWarning, events.WarnAlreadyMountedVolume, "The requested fsGroup is %d, but the volume %s has GID %d. The volume may not be shareable.", fsGroupNew, m.volName, fsGroupOld)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
|
||||
return err
|
||||
if runtime.GOOS != "windows" {
|
||||
// skip below MkdirAll for windows since the "bind mount" logic is implemented differently in mount_wiondows.go
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same volume.
|
||||
options := []string{"bind"}
|
||||
if m.readOnly {
|
||||
@ -308,7 +310,8 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
}
|
||||
|
||||
glog.V(4).Infof("attempting to mount %s", dir)
|
||||
err = m.mounter.Mount(m.globalPath, dir, "", options)
|
||||
globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath)
|
||||
err = m.mounter.Mount(globalPath, dir, "", options)
|
||||
if err != nil {
|
||||
glog.Errorf("Mount of volume %s failed: %v", dir, err)
|
||||
notMnt, mntErr := m.mounter.IsNotMountPoint(dir)
|
||||
@ -344,6 +347,17 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterPodMounts only returns mount paths inside the kubelet pod directory
|
||||
func (m *localVolumeMounter) filterPodMounts(refs []string) []string {
|
||||
filtered := []string{}
|
||||
for _, r := range refs {
|
||||
if strings.HasPrefix(r, m.plugin.host.GetPodsDir()+string(os.PathSeparator)) {
|
||||
filtered = append(filtered, r)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
type localVolumeUnmounter struct {
|
||||
*localVolume
|
||||
}
|
||||
@ -371,8 +385,13 @@ var _ volume.BlockVolumeMapper = &localVolumeMapper{}
|
||||
|
||||
// SetUpDevice provides physical device path for the local PV.
|
||||
func (m *localVolumeMapper) SetUpDevice() (string, error) {
|
||||
glog.V(4).Infof("SetupDevice returning path %s", m.globalPath)
|
||||
return m.globalPath, nil
|
||||
globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath)
|
||||
glog.V(4).Infof("SetupDevice returning path %s", globalPath)
|
||||
return globalPath, nil
|
||||
}
|
||||
|
||||
func (m *localVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
// localVolumeUnmapper implements the BlockVolumeUnmapper interface for local volumes.
|
||||
@ -391,7 +410,7 @@ func (u *localVolumeUnmapper) TearDownDevice(mapPath, devicePath string) error {
|
||||
// GetGlobalMapPath returns global map path and error.
|
||||
// path: plugins/kubernetes.io/kubernetes.io/local-volume/volumeDevices/{volumeName}
|
||||
func (lv *localVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
return path.Join(lv.plugin.host.GetVolumeDevicePluginDir(strings.EscapeQualifiedNameForDisk(localVolumePluginName)),
|
||||
return filepath.Join(lv.plugin.host.GetVolumeDevicePluginDir(stringsutil.EscapeQualifiedNameForDisk(localVolumePluginName)),
|
||||
lv.volName), nil
|
||||
}
|
||||
|
||||
@ -400,5 +419,5 @@ func (lv *localVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
// volName: local-pv-ff0d6d4
|
||||
func (lv *localVolume) GetPodDeviceMapPath() (string, string) {
|
||||
return lv.plugin.host.GetPodVolumeDeviceDir(lv.podUID,
|
||||
strings.EscapeQualifiedNameForDisk(localVolumePluginName)), lv.volName
|
||||
stringsutil.EscapeQualifiedNameForDisk(localVolumePluginName)), lv.volName
|
||||
}
|
||||
|
68
vendor/k8s.io/kubernetes/pkg/volume/local/local_linux_test.go
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/pkg/volume/local/local_linux_test.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// +build linux darwin
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func TestFSGroupMount(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
info, err := os.Stat(tmpDir)
|
||||
if err != nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
s := info.Sys().(*syscall.Stat_t)
|
||||
if s == nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
fsGroup1 := int64(s.Gid)
|
||||
fsGroup2 := fsGroup1 + 1
|
||||
pod1 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
pod1.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup1,
|
||||
}
|
||||
pod2 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
pod2.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup2,
|
||||
}
|
||||
err = testFSGroupMount(plug, pod1, tmpDir, fsGroup1)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
err = testFSGroupMount(plug, pod2, tmpDir, fsGroup2)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
//Checking if GID of tmpDir has not been changed by mounting it by second pod
|
||||
s = info.Sys().(*syscall.Stat_t)
|
||||
if s == nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
if fsGroup1 != int64(s.Gid) {
|
||||
t.Errorf("Old Gid %d for volume %s got overwritten by new Gid %d", fsGroup1, tmpDir, int64(s.Gid))
|
||||
}
|
||||
}
|
114
vendor/k8s.io/kubernetes/pkg/volume/local/local_test.go
generated
vendored
114
vendor/k8s.io/kubernetes/pkg/volume/local/local_test.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux darwin
|
||||
// +build linux darwin windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
@ -22,7 +22,9 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -199,11 +201,15 @@ func TestMountUnmount(t *testing.T) {
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
// skip this check in windows since the "bind mount" logic is implemented differently in mount_wiondows.go
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,6 +266,7 @@ func TestMapUnmap(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Failed to SetUpDevice, err: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(devPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUpDevice() failed, volume path not created: %s", devPath)
|
||||
@ -302,45 +309,6 @@ func testFSGroupMount(plug volume.VolumePlugin, pod *v1.Pod, tmpDir string, fsGr
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestFSGroupMount(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
info, err := os.Stat(tmpDir)
|
||||
if err != nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
s := info.Sys().(*syscall.Stat_t)
|
||||
if s == nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
fsGroup1 := int64(s.Gid)
|
||||
fsGroup2 := fsGroup1 + 1
|
||||
pod1 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
pod1.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup1,
|
||||
}
|
||||
pod2 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
pod2.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup2,
|
||||
}
|
||||
err = testFSGroupMount(plug, pod1, tmpDir, fsGroup1)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
err = testFSGroupMount(plug, pod2, tmpDir, fsGroup2)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
//Checking if GID of tmpDir has not been changed by mounting it by second pod
|
||||
s = info.Sys().(*syscall.Stat_t)
|
||||
if s == nil {
|
||||
t.Errorf("Error getting stats for %s (%v)", tmpDir, err)
|
||||
}
|
||||
if fsGroup1 != int64(s.Gid) {
|
||||
t.Errorf("Old Gid %d for volume %s got overwritten by new Gid %d", fsGroup1, tmpDir, int64(s.Gid))
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstructVolumeSpec(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
@ -481,3 +449,57 @@ func TestUnsupportedPlugins(t *testing.T) {
|
||||
t.Errorf("Provisionable plugin found, expected none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterPodMounts(t *testing.T) {
|
||||
tmpDir, plug := getPlugin(t)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
|
||||
mounter, err := plug.NewMounter(getTestVolume(false, tmpDir, false), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lvMounter, ok := mounter.(*localVolumeMounter)
|
||||
if !ok {
|
||||
t.Fatal("mounter is not localVolumeMounter")
|
||||
}
|
||||
|
||||
host := volumetest.NewFakeVolumeHost(tmpDir, nil, nil)
|
||||
podsDir := host.GetPodsDir()
|
||||
|
||||
cases := map[string]struct {
|
||||
input []string
|
||||
expected []string
|
||||
}{
|
||||
"empty": {
|
||||
[]string{},
|
||||
[]string{},
|
||||
},
|
||||
"not-pod-mount": {
|
||||
[]string{"/mnt/outside"},
|
||||
[]string{},
|
||||
},
|
||||
"pod-mount": {
|
||||
[]string{filepath.Join(podsDir, "pod-mount")},
|
||||
[]string{filepath.Join(podsDir, "pod-mount")},
|
||||
},
|
||||
"not-directory-prefix": {
|
||||
[]string{podsDir + "pod-mount"},
|
||||
[]string{},
|
||||
},
|
||||
"mix": {
|
||||
[]string{"/mnt/outside",
|
||||
filepath.Join(podsDir, "pod-mount"),
|
||||
"/another/outside",
|
||||
filepath.Join(podsDir, "pod-mount2")},
|
||||
[]string{filepath.Join(podsDir, "pod-mount"),
|
||||
filepath.Join(podsDir, "pod-mount2")},
|
||||
},
|
||||
}
|
||||
for name, test := range cases {
|
||||
output := lvMounter.filterPodMounts(test.input)
|
||||
if !reflect.DeepEqual(output, test.expected) {
|
||||
t.Errorf("%v failed: output %+v doesn't equal expected %+v", name, output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/volume/metrics_du.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
var _ MetricsProvider = &metricsDu{}
|
||||
|
||||
// metricsDu represents a MetricsProvider that calculates the used and
|
||||
// available Volume space by executing the "du" command and gathering
|
||||
// available Volume space by calling fs.DiskUsage() and gathering
|
||||
// filesystem info for the Volume path.
|
||||
type metricsDu struct {
|
||||
// the directory path the volume is mounted to.
|
||||
@ -46,7 +46,7 @@ func (md *metricsDu) GetMetrics() (*Metrics, error) {
|
||||
return metrics, NewNoPathDefinedError()
|
||||
}
|
||||
|
||||
err := md.runDu(metrics)
|
||||
err := md.runDiskUsage(metrics)
|
||||
if err != nil {
|
||||
return metrics, err
|
||||
}
|
||||
@ -64,9 +64,9 @@ func (md *metricsDu) GetMetrics() (*Metrics, error) {
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// runDu executes the "du" command and writes the results to metrics.Used
|
||||
func (md *metricsDu) runDu(metrics *Metrics) error {
|
||||
used, err := fs.Du(md.path)
|
||||
// runDiskUsage gets disk usage of md.path and writes the results to metrics.Used
|
||||
func (md *metricsDu) runDiskUsage(metrics *Metrics) error {
|
||||
used, err := fs.DiskUsage(md.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go
generated
vendored
@ -255,7 +255,7 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
glog.Errorf("IsNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
@ -265,7 +265,7 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
glog.Errorf("IsNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go
generated
vendored
@ -340,11 +340,15 @@ func (plugin *photonPersistentDiskPlugin) newProvisionerInternal(options volume.
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (p *photonPersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
if util.CheckPersistentVolumeClaimModeBlock(p.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
pdID, sizeGB, fstype, err := p.manager.CreateVolume(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd_test.go
generated
vendored
@ -166,7 +166,7 @@ func TestPlugin(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
107
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
107
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -30,18 +31,29 @@ import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||
)
|
||||
|
||||
type ProbeOperation uint32
|
||||
type ProbeEvent struct {
|
||||
Plugin VolumePlugin // VolumePlugin that was added/updated/removed. if ProbeEvent.Op is 'ProbeRemove', Plugin should be nil
|
||||
PluginName string
|
||||
Op ProbeOperation // The operation to the plugin
|
||||
}
|
||||
|
||||
const (
|
||||
// Common parameter which can be specified in StorageClass to specify the desired FSType
|
||||
// Provisioners SHOULD implement support for this if they are block device based
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs". Default value depends on the provisioner
|
||||
VolumeParameterFSType = "fstype"
|
||||
|
||||
ProbeAddOrUpdate ProbeOperation = 1 << iota
|
||||
ProbeRemove
|
||||
)
|
||||
|
||||
// VolumeOptions contains option information about a volume.
|
||||
@ -75,12 +87,8 @@ type VolumeOptions struct {
|
||||
type DynamicPluginProber interface {
|
||||
Init() error
|
||||
|
||||
// If an update has occurred since the last probe, updated = true
|
||||
// and the list of probed plugins is returned.
|
||||
// Otherwise, update = false and probedPlugins = nil.
|
||||
//
|
||||
// If an error occurs, updated and probedPlugins are undefined.
|
||||
Probe() (updated bool, probedPlugins []VolumePlugin, err error)
|
||||
// If an error occurs, events are undefined.
|
||||
Probe() (events []ProbeEvent, err error)
|
||||
}
|
||||
|
||||
// VolumePlugin is an interface to volume plugins that can be used on a
|
||||
@ -209,6 +217,32 @@ type ExpandableVolumePlugin interface {
|
||||
RequiresFSResize() bool
|
||||
}
|
||||
|
||||
// VolumePluginWithAttachLimits is an extended interface of VolumePlugin that restricts number of
|
||||
// volumes that can be attached to a node.
|
||||
type VolumePluginWithAttachLimits interface {
|
||||
VolumePlugin
|
||||
// Return maximum number of volumes that can be attached to a node for this plugin.
|
||||
// The key must be same as string returned by VolumeLimitKey function. The returned
|
||||
// map may look like:
|
||||
// - { "storage-limits-aws-ebs": 39 }
|
||||
// - { "storage-limits-gce-pd": 10 }
|
||||
// A volume plugin may return error from this function - if it can not be used on a given node or not
|
||||
// applicable in given environment (where environment could be cloudprovider or any other dependency)
|
||||
// For example - calling this function for EBS volume plugin on a GCE node should
|
||||
// result in error.
|
||||
// The returned values are stored in node allocatable property and will be used
|
||||
// by scheduler to determine how many pods with volumes can be scheduled on given node.
|
||||
GetVolumeLimits() (map[string]int64, error)
|
||||
// Return volume limit key string to be used in node capacity constraints
|
||||
// The key must start with prefix storage-limits-. For example:
|
||||
// - storage-limits-aws-ebs
|
||||
// - storage-limits-csi-cinder
|
||||
// The key should respect character limit of ResourceName type
|
||||
// This function may be called by kubelet or scheduler to identify node allocatable property
|
||||
// which stores volumes limits.
|
||||
VolumeLimitKey(spec *Spec) string
|
||||
}
|
||||
|
||||
// BlockVolumePlugin is an extend interface of VolumePlugin and is used for block volumes support.
|
||||
type BlockVolumePlugin interface {
|
||||
VolumePlugin
|
||||
@ -242,6 +276,10 @@ type VolumeHost interface {
|
||||
// ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/
|
||||
GetVolumeDevicePluginDir(pluginName string) string
|
||||
|
||||
// GetPodsDir returns the absolute path to a directory where all the pods
|
||||
// information is stored
|
||||
GetPodsDir() string
|
||||
|
||||
// GetPodVolumeDir returns the absolute path a directory which
|
||||
// represents the named volume under the named plugin for the given
|
||||
// pod. If the specified pod does not exist, the result of this call
|
||||
@ -299,6 +337,8 @@ type VolumeHost interface {
|
||||
// Returns a function that returns a configmap.
|
||||
GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error)
|
||||
|
||||
GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
|
||||
|
||||
// Returns an interface that should be used to execute any utilities in volume plugins
|
||||
GetExec(pluginName string) mount.Exec
|
||||
|
||||
@ -307,6 +347,9 @@ type VolumeHost interface {
|
||||
|
||||
// Returns the name of the node
|
||||
GetNodeName() types.NodeName
|
||||
|
||||
// Returns the event recorder of kubelet.
|
||||
GetEventRecorder() record.EventRecorder
|
||||
}
|
||||
|
||||
// VolumePluginMgr tracks registered plugins.
|
||||
@ -314,7 +357,7 @@ type VolumePluginMgr struct {
|
||||
mutex sync.Mutex
|
||||
plugins map[string]VolumePlugin
|
||||
prober DynamicPluginProber
|
||||
probedPlugins []VolumePlugin
|
||||
probedPlugins map[string]VolumePlugin
|
||||
Host VolumeHost
|
||||
}
|
||||
|
||||
@ -431,6 +474,9 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu
|
||||
if pm.plugins == nil {
|
||||
pm.plugins = map[string]VolumePlugin{}
|
||||
}
|
||||
if pm.probedPlugins == nil {
|
||||
pm.probedPlugins = map[string]VolumePlugin{}
|
||||
}
|
||||
|
||||
allErrs := []error{}
|
||||
for _, plugin := range plugins {
|
||||
@ -544,25 +590,40 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
|
||||
// Check if probedPlugin cache update is required.
|
||||
// If it is, initialize all probed plugins and replace the cache with them.
|
||||
func (pm *VolumePluginMgr) refreshProbedPlugins() {
|
||||
updated, plugins, err := pm.prober.Probe()
|
||||
events, err := pm.prober.Probe()
|
||||
if err != nil {
|
||||
glog.Errorf("Error dynamically probing plugins: %s", err)
|
||||
return // Use cached plugins upon failure.
|
||||
}
|
||||
|
||||
if updated {
|
||||
pm.probedPlugins = []VolumePlugin{}
|
||||
for _, plugin := range plugins {
|
||||
if err := pm.initProbedPlugin(plugin); err != nil {
|
||||
for _, event := range events {
|
||||
if event.Op == ProbeAddOrUpdate {
|
||||
if err := pm.initProbedPlugin(event.Plugin); err != nil {
|
||||
glog.Errorf("Error initializing dynamically probed plugin %s; error: %s",
|
||||
plugin.GetPluginName(), err)
|
||||
event.Plugin.GetPluginName(), err)
|
||||
continue
|
||||
}
|
||||
pm.probedPlugins = append(pm.probedPlugins, plugin)
|
||||
pm.probedPlugins[event.Plugin.GetPluginName()] = event.Plugin
|
||||
} else if event.Op == ProbeRemove {
|
||||
delete(pm.probedPlugins, event.Plugin.GetPluginName())
|
||||
} else {
|
||||
glog.Errorf("Unknown Operation on PluginName: %s.",
|
||||
event.Plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ListVolumePluginWithLimits returns plugins that have volume limits on nodes
|
||||
func (pm *VolumePluginMgr) ListVolumePluginWithLimits() []VolumePluginWithAttachLimits {
|
||||
matchedPlugins := []VolumePluginWithAttachLimits{}
|
||||
for _, v := range pm.plugins {
|
||||
if plugin, ok := v.(VolumePluginWithAttachLimits); ok {
|
||||
matchedPlugins = append(matchedPlugins, plugin)
|
||||
}
|
||||
}
|
||||
return matchedPlugins
|
||||
}
|
||||
|
||||
// FindPersistentPluginBySpec looks for a persistent volume plugin that can
|
||||
// support a given volume specification. If no plugin is found, return an
|
||||
// error
|
||||
@ -577,6 +638,20 @@ func (pm *VolumePluginMgr) FindPersistentPluginBySpec(spec *Spec) (PersistentVol
|
||||
return nil, fmt.Errorf("no persistent volume plugin matched")
|
||||
}
|
||||
|
||||
// FindVolumePluginWithLimitsBySpec returns volume plugin that has a limit on how many
|
||||
// of them can be attached to a node
|
||||
func (pm *VolumePluginMgr) FindVolumePluginWithLimitsBySpec(spec *Spec) (VolumePluginWithAttachLimits, error) {
|
||||
volumePlugin, err := pm.FindPluginBySpec(spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not find volume plugin for spec : %#v", spec)
|
||||
}
|
||||
|
||||
if limitedPlugin, ok := volumePlugin.(VolumePluginWithAttachLimits); ok {
|
||||
return limitedPlugin, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no plugin with limits found")
|
||||
}
|
||||
|
||||
// FindPersistentPluginByName fetches a persistent volume plugin by name. If
|
||||
// no plugin is found, returns error.
|
||||
func (pm *VolumePluginMgr) FindPersistentPluginByName(name string) (PersistentVolumePlugin, error) {
|
||||
@ -803,5 +878,5 @@ func ValidateRecyclerPodTemplate(pod *v1.Pod) error {
|
||||
|
||||
type dummyPluginProber struct{}
|
||||
|
||||
func (*dummyPluginProber) Init() error { return nil }
|
||||
func (*dummyPluginProber) Probe() (bool, []VolumePlugin, error) { return false, nil, nil }
|
||||
func (*dummyPluginProber) Init() error { return nil }
|
||||
func (*dummyPluginProber) Probe() ([]ProbeEvent, error) { return nil, nil }
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD
generated
vendored
@ -15,6 +15,7 @@ go_test(
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
|
33
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go
generated
vendored
@ -50,6 +50,7 @@ var _ volume.VolumePlugin = &portworxVolumePlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &portworxVolumePlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &portworxVolumePlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &portworxVolumePlugin{}
|
||||
var _ volume.ExpandableVolumePlugin = &portworxVolumePlugin{}
|
||||
|
||||
const (
|
||||
portworxVolumePluginName = "kubernetes.io/portworx-volume"
|
||||
@ -171,6 +172,24 @@ func (plugin *portworxVolumePlugin) newProvisionerInternal(options volume.Volume
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *portworxVolumePlugin) RequiresFSResize() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *portworxVolumePlugin) ExpandVolumeDevice(
|
||||
spec *volume.Spec,
|
||||
newSize resource.Quantity,
|
||||
oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
glog.V(4).Infof("Expanding: %s from %v to %v", spec.Name(), oldSize, newSize)
|
||||
err := plugin.util.ResizeVolume(spec, newSize, plugin.host)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Successfully resized %s to %v", spec.Name(), newSize)
|
||||
return newSize, nil
|
||||
}
|
||||
|
||||
func (plugin *portworxVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
portworxVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
@ -206,7 +225,7 @@ func getVolumeSource(
|
||||
// Abstract interface to PD operations.
|
||||
type portworxManager interface {
|
||||
// Creates a volume
|
||||
CreateVolume(provisioner *portworxVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error)
|
||||
CreateVolume(provisioner *portworxVolumeProvisioner) (volumeID string, volumeSizeGB int64, labels map[string]string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *portworxVolumeDeleter) error
|
||||
// Attach a volume
|
||||
@ -217,6 +236,8 @@ type portworxManager interface {
|
||||
MountVolume(mounter *portworxVolumeMounter, mountDir string) error
|
||||
// Unmount a volume
|
||||
UnmountVolume(unmounter *portworxVolumeUnmounter, mountDir string) error
|
||||
// Resize a volume
|
||||
ResizeVolume(spec *volume.Spec, newSize resource.Quantity, host volume.VolumeHost) error
|
||||
}
|
||||
|
||||
// portworxVolume volumes are portworx block devices
|
||||
@ -357,12 +378,16 @@ type portworxVolumeProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &portworxVolumeProvisioner{}
|
||||
|
||||
func (c *portworxVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *portworxVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
|
||||
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
volumeID, sizeGiB, labels, err := c.manager.CreateVolume(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -379,7 +404,7 @@ func (c *portworxVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
PortworxVolume: &v1.PortworxVolumeSource{
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_test.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_test.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
@ -106,7 +107,7 @@ func (fake *fakePortworxManager) UnmountVolume(c *portworxVolumeUnmounter, mount
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePortworxManager) CreateVolume(c *portworxVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error) {
|
||||
func (fake *fakePortworxManager) CreateVolume(c *portworxVolumeProvisioner) (volumeID string, volumeSizeGB int64, labels map[string]string, err error) {
|
||||
labels = make(map[string]string)
|
||||
labels["fakeportworxmanager"] = "yes"
|
||||
return PortworxTestVolume, 100, labels, nil
|
||||
@ -119,6 +120,10 @@ func (fake *fakePortworxManager) DeleteVolume(cd *portworxVolumeDeleter) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePortworxManager) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("portworxVolumeTest")
|
||||
if err != nil {
|
||||
@ -199,7 +204,7 @@ func TestPlugin(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error creating a new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
63
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go
generated
vendored
63
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go
generated
vendored
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package portworx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
osdapi "github.com/libopenstorage/openstorage/api"
|
||||
osdclient "github.com/libopenstorage/openstorage/api/client"
|
||||
@ -24,6 +26,7 @@ import (
|
||||
osdspec "github.com/libopenstorage/openstorage/api/spec"
|
||||
volumeapi "github.com/libopenstorage/openstorage/volume"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -45,7 +48,7 @@ type PortworxVolumeUtil struct {
|
||||
}
|
||||
|
||||
// CreateVolume creates a Portworx volume.
|
||||
func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int, map[string]string, error) {
|
||||
func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) {
|
||||
driver, err := util.getPortworxDriver(p.plugin.host, false /*localOnly*/)
|
||||
if err != nil || driver == nil {
|
||||
glog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
@ -55,8 +58,8 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri
|
||||
glog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name)
|
||||
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
// Portworx Volumes are specified in GB
|
||||
requestGB := int(volutil.RoundUpSize(capacity.Value(), 1024*1024*1024))
|
||||
// Portworx Volumes are specified in GiB
|
||||
requestGiB := volutil.RoundUpToGiB(capacity)
|
||||
|
||||
// Perform a best-effort parsing of parameters. Portworx 1.2.9 and later parses volume parameters from
|
||||
// spec.VolumeLabels. So even if below SpecFromOpts() fails to parse certain parameters or
|
||||
@ -71,7 +74,8 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri
|
||||
// Pass all parameters as volume labels for Portworx server-side processing.
|
||||
spec.VolumeLabels = p.options.Parameters
|
||||
// Update the requested size in the spec
|
||||
spec.Size = uint64(requestGB * 1024 * 1024 * 1024)
|
||||
spec.Size = uint64(requestGiB * volutil.GIB)
|
||||
|
||||
// Change the Portworx Volume name to PV name
|
||||
if locator == nil {
|
||||
locator = &osdapi.VolumeLocator{
|
||||
@ -99,7 +103,7 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri
|
||||
}
|
||||
|
||||
glog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name)
|
||||
return volumeID, requestGB, nil, err
|
||||
return volumeID, requestGiB, nil, err
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a Portworx volume
|
||||
@ -182,6 +186,55 @@ func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountP
|
||||
return nil
|
||||
}
|
||||
|
||||
func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error {
|
||||
driver, err := util.getPortworxDriver(volumeHost, false /*localOnly*/)
|
||||
if err != nil || driver == nil {
|
||||
glog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
vols, err := driver.Inspect([]string{spec.Name()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(vols) != 1 {
|
||||
return fmt.Errorf("failed to inspect Portworx volume: %s. Found: %d volumes", spec.Name(), len(vols))
|
||||
}
|
||||
|
||||
vol := vols[0]
|
||||
newSizeInBytes := uint64(volutil.RoundUpToGiB(newSize) * volutil.GIB)
|
||||
if vol.Spec.Size >= newSizeInBytes {
|
||||
glog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+
|
||||
"requested size: %d. Skipping resize.", vol.Spec.Size, newSizeInBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
vol.Spec.Size = newSizeInBytes
|
||||
err = driver.Set(spec.Name(), vol.Locator, vol.Spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if the volume's size actually got updated
|
||||
vols, err = driver.Inspect([]string{spec.Name()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(vols) != 1 {
|
||||
return fmt.Errorf("failed to inspect resized Portworx volume: %s. Found: %d volumes", spec.Name(), len(vols))
|
||||
}
|
||||
|
||||
updatedVol := vols[0]
|
||||
if updatedVol.Spec.Size < vol.Spec.Size {
|
||||
return fmt.Errorf("Portworx volume: %s doesn't match expected size after resize. expected:%v actual:%v",
|
||||
spec.Name(), vol.Spec.Size, updatedVol.Spec.Size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isClientValid(client *osdclient.Client) (bool, error) {
|
||||
if client == nil {
|
||||
return false, nil
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD
generated
vendored
@ -28,6 +28,7 @@ go_library(
|
||||
srcs = ["projected.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/projected",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/configmap:go_default_library",
|
||||
@ -35,11 +36,13 @@ go_library(
|
||||
"//pkg/volume/secret:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
63
vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go
generated
vendored
63
vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go
generated
vendored
@ -21,18 +21,22 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/configmap"
|
||||
"k8s.io/kubernetes/pkg/volume/downwardapi"
|
||||
"k8s.io/kubernetes/pkg/volume/secret"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the entry point for plugin detection in a package.
|
||||
@ -45,9 +49,10 @@ const (
|
||||
)
|
||||
|
||||
type projectedPlugin struct {
|
||||
host volume.VolumeHost
|
||||
getSecret func(namespace, name string) (*v1.Secret, error)
|
||||
getConfigMap func(namespace, name string) (*v1.ConfigMap, error)
|
||||
host volume.VolumeHost
|
||||
getSecret func(namespace, name string) (*v1.Secret, error)
|
||||
getConfigMap func(namespace, name string) (*v1.ConfigMap, error)
|
||||
getServiceAccountToken func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &projectedPlugin{}
|
||||
@ -70,6 +75,7 @@ func (plugin *projectedPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.getSecret = host.GetSecretFunc()
|
||||
plugin.getConfigMap = host.GetConfigMapFunc()
|
||||
plugin.getServiceAccountToken = host.GetServiceAccountTokenFunc()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -188,18 +194,19 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(s.volName, dir, *s.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := s.collectData()
|
||||
if err != nil {
|
||||
glog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := volumeutil.MakeNestedMountpoints(s.volName, dir, *s.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", s.pod.Namespace, s.pod.Name, s.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
@ -219,7 +226,6 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -236,7 +242,8 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec
|
||||
errlist := []error{}
|
||||
payload := make(map[string]volumeutil.FileProjection)
|
||||
for _, source := range s.source.Sources {
|
||||
if source.Secret != nil {
|
||||
switch {
|
||||
case source.Secret != nil:
|
||||
optional := source.Secret.Optional != nil && *source.Secret.Optional
|
||||
secretapi, err := s.plugin.getSecret(s.pod.Namespace, source.Secret.Name)
|
||||
if err != nil {
|
||||
@ -261,7 +268,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec
|
||||
for k, v := range secretPayload {
|
||||
payload[k] = v
|
||||
}
|
||||
} else if source.ConfigMap != nil {
|
||||
case source.ConfigMap != nil:
|
||||
optional := source.ConfigMap.Optional != nil && *source.ConfigMap.Optional
|
||||
configMap, err := s.plugin.getConfigMap(s.pod.Namespace, source.ConfigMap.Name)
|
||||
if err != nil {
|
||||
@ -286,7 +293,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec
|
||||
for k, v := range configMapPayload {
|
||||
payload[k] = v
|
||||
}
|
||||
} else if source.DownwardAPI != nil {
|
||||
case source.DownwardAPI != nil:
|
||||
downwardAPIPayload, err := downwardapi.CollectData(source.DownwardAPI.Items, s.pod, s.plugin.host, s.source.DefaultMode)
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
@ -295,6 +302,34 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec
|
||||
for k, v := range downwardAPIPayload {
|
||||
payload[k] = v
|
||||
}
|
||||
case source.ServiceAccountToken != nil:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.TokenRequestProjection) {
|
||||
errlist = append(errlist, fmt.Errorf("pod request ServiceAccountToken projection but the TokenRequestProjection feature was not enabled"))
|
||||
continue
|
||||
}
|
||||
tp := source.ServiceAccountToken
|
||||
tr, err := s.plugin.getServiceAccountToken(s.pod.Namespace, s.pod.Spec.ServiceAccountName, &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{
|
||||
tp.Audience,
|
||||
},
|
||||
ExpirationSeconds: tp.ExpirationSeconds,
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
Name: s.pod.Name,
|
||||
UID: s.pod.UID,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
continue
|
||||
}
|
||||
payload[tp.Path] = volumeutil.FileProjection{
|
||||
Data: []byte(tr.Status.Token),
|
||||
Mode: 0600,
|
||||
}
|
||||
}
|
||||
}
|
||||
return payload, utilerrors.NewAggregate(errlist)
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go
generated
vendored
@ -354,11 +354,15 @@ type quobyteVolumeProvisioner struct {
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (provisioner *quobyteVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(provisioner.plugin.GetAccessModes(), provisioner.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", provisioner.options.PVC.Spec.AccessModes, provisioner.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
if util.CheckPersistentVolumeClaimModeBlock(provisioner.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", provisioner.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
if provisioner.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("claim Selector is not supported")
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go
generated
vendored
@ -18,7 +18,8 @@ package quobyte
|
||||
|
||||
import (
|
||||
"net"
|
||||
"path"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -101,7 +102,7 @@ func (mounter *quobyteMounter) pluginDirIsMounted(pluginDir string) (bool, error
|
||||
}
|
||||
|
||||
func (mounter *quobyteMounter) correctTraillingSlash(regStr string) string {
|
||||
return path.Clean(regStr) + "/"
|
||||
return filepath.Clean(regStr) + string(os.PathSeparator)
|
||||
}
|
||||
|
||||
func validateRegistry(registry string) bool {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD
generated
vendored
@ -17,6 +17,7 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/rbd",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
@ -33,6 +34,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go
generated
vendored
@ -189,21 +189,17 @@ func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
|
||||
return nil
|
||||
}
|
||||
devicePath, cnt, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
|
||||
devicePath, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cnt > 1 {
|
||||
return fmt.Errorf("rbd: more than 1 reference counts at %s", deviceMountPath)
|
||||
}
|
||||
if cnt == 1 {
|
||||
// Unmount the device from the device mount point.
|
||||
glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath)
|
||||
if err = detacher.mounter.Unmount(deviceMountPath); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath)
|
||||
// Unmount the device from the device mount point.
|
||||
glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath)
|
||||
if err = detacher.mounter.Unmount(deviceMountPath); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath)
|
||||
|
||||
glog.V(4).Infof("rbd: detaching device %s", devicePath)
|
||||
err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath)
|
||||
if err != nil {
|
||||
|
77
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go
generated
vendored
77
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go
generated
vendored
@ -30,7 +30,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -232,6 +234,10 @@ func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, po
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ams, err := getVolumeAccessModes(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace)
|
||||
if err != nil {
|
||||
@ -255,12 +261,13 @@ func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, po
|
||||
}
|
||||
|
||||
return &rbdMounter{
|
||||
rbd: newRBD("", spec.Name(), img, pool, ro, plugin, &RBDUtil{}),
|
||||
Mon: mon,
|
||||
Id: id,
|
||||
Keyring: keyring,
|
||||
Secret: secret,
|
||||
fsType: fstype,
|
||||
rbd: newRBD("", spec.Name(), img, pool, ro, plugin, &RBDUtil{}),
|
||||
Mon: mon,
|
||||
Id: id,
|
||||
Keyring: keyring,
|
||||
Secret: secret,
|
||||
fsType: fstype,
|
||||
accessModes: ams,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -319,6 +326,10 @@ func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ams, err := getVolumeAccessModes(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rbdMounter{
|
||||
rbd: newRBD(podUID, spec.Name(), img, pool, ro, plugin, manager),
|
||||
@ -328,6 +339,7 @@ func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
|
||||
Secret: secret,
|
||||
fsType: fstype,
|
||||
mountOptions: volutil.MountOptionFromSpec(spec),
|
||||
accessModes: ams,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -434,7 +446,6 @@ func (plugin *rbdPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _
|
||||
uid = pod.UID
|
||||
}
|
||||
secret := ""
|
||||
// var err error
|
||||
if pod != nil {
|
||||
secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace)
|
||||
if err != nil {
|
||||
@ -509,7 +520,7 @@ func (plugin *rbdPlugin) newUnmapperInternal(volName string, podUID types.UID, m
|
||||
}
|
||||
|
||||
func (plugin *rbdPlugin) getDeviceNameFromOldMountPath(mounter mount.Interface, mountPath string) (string, error) {
|
||||
refs, err := mount.GetMountRefsByDev(mounter, mountPath)
|
||||
refs, err := mounter.GetMountRefs(mountPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -568,7 +579,7 @@ type rbdVolumeProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &rbdVolumeProvisioner{}
|
||||
|
||||
func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (r *rbdVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !volutil.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", r.options.PVC.Spec.AccessModes, r.plugin.GetAccessModes())
|
||||
}
|
||||
@ -590,9 +601,7 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
switch dstrings.ToLower(k) {
|
||||
case "monitors":
|
||||
arr := dstrings.Split(v, ",")
|
||||
for _, m := range arr {
|
||||
r.Mon = append(r.Mon, m)
|
||||
}
|
||||
r.Mon = append(r.Mon, arr...)
|
||||
case "adminid":
|
||||
r.adminId = v
|
||||
case "adminsecretname":
|
||||
@ -691,6 +700,11 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", sizeMB)),
|
||||
}
|
||||
pv.Spec.MountOptions = r.options.MountOptions
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
pv.Spec.VolumeMode = r.options.PVC.Spec.VolumeMode
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
@ -767,6 +781,7 @@ type rbdMounter struct {
|
||||
mountOptions []string
|
||||
imageFormat string
|
||||
imageFeatures []string
|
||||
accessModes []v1.PersistentVolumeAccessMode
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &rbdMounter{}
|
||||
@ -863,8 +878,11 @@ func (rbd *rbdDiskMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (rbd *rbdDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return volutil.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
func (rbd *rbd) rbdGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
var err error
|
||||
mon, err := getVolumeSourceMonitors(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -940,14 +958,20 @@ func (rbd *rbdDiskUnmapper) TearDownDevice(mapPath, _ string) error {
|
||||
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, device)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err)
|
||||
if err.Error() != volumepathhandler.ErrDeviceNotFound {
|
||||
return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err)
|
||||
}
|
||||
glog.Warning("rbd: loopback for device: % not found", device)
|
||||
} else {
|
||||
if len(loop) != 0 {
|
||||
// Remove loop device before detaching volume since volume detach operation gets busy if volume is opened by loopback.
|
||||
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rbd: failed to remove loopback :%v, err: %v", loop, err)
|
||||
}
|
||||
glog.V(4).Infof("rbd: successfully removed loop device: %s", loop)
|
||||
}
|
||||
}
|
||||
// Remove loop device before detaching volume since volume detach operation gets busy if volume is opened by loopback.
|
||||
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rbd: failed to remove loopback :%v, err: %v", loop, err)
|
||||
}
|
||||
glog.V(4).Infof("rbd: successfully removed loop device: %s", loop)
|
||||
|
||||
err = rbd.manager.DetachBlockDisk(*rbd, mapPath)
|
||||
if err != nil {
|
||||
@ -1043,6 +1067,19 @@ func getVolumeSourceReadOnly(spec *volume.Spec) (bool, error) {
|
||||
return false, fmt.Errorf("Spec does not reference a RBD volume type")
|
||||
}
|
||||
|
||||
func getVolumeAccessModes(spec *volume.Spec) ([]v1.PersistentVolumeAccessMode, error) {
|
||||
// Only PersistentVolumeSpec has AccessModes
|
||||
if spec.PersistentVolume != nil {
|
||||
if spec.PersistentVolume.Spec.RBD != nil {
|
||||
return spec.PersistentVolume.Spec.AccessModes, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Spec does not reference a RBD volume type")
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (string, error) {
|
||||
secret, err := volutil.GetSecretForPod(pod, secretName, kubeClient)
|
||||
if err != nil {
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_test.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@ -357,6 +358,7 @@ func TestPlugin(t *testing.T) {
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
|
||||
},
|
||||
}, false),
|
||||
root: tmpDir,
|
||||
@ -532,6 +534,9 @@ func TestGetDeviceMountPath(t *testing.T) {
|
||||
|
||||
// https://github.com/kubernetes/kubernetes/issues/57744
|
||||
func TestConstructVolumeSpec(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skipf("TestConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
|
||||
}
|
||||
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go
generated
vendored
@ -346,6 +346,8 @@ func (util *RBDUtil) rbdUnlock(b rbdMounter) error {
|
||||
cmd, err = b.exec.Run("rbd", args...)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon)
|
||||
} else {
|
||||
glog.Warningf("rbd: failed to remove lock (lock_id: %s) on image: %s/%s with id %s mon %s: %v", lock_id, b.Pool, b.Image, b.Id, mon, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -372,7 +374,7 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) {
|
||||
nbdToolsFound := false
|
||||
|
||||
if !mapped {
|
||||
nbdToolsFound := checkRbdNbdTools(b.exec)
|
||||
nbdToolsFound = checkRbdNbdTools(b.exec)
|
||||
if nbdToolsFound {
|
||||
devicePath, mapped = waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, true /*useNbdDriver*/)
|
||||
}
|
||||
@ -388,12 +390,22 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) {
|
||||
Factor: rbdImageWatcherFactor,
|
||||
Steps: rbdImageWatcherSteps,
|
||||
}
|
||||
needValidUsed := true
|
||||
// If accessModes contain ReadOnlyMany, we don't need check rbd status of being used.
|
||||
if b.accessModes != nil {
|
||||
for _, v := range b.accessModes {
|
||||
if v != v1.ReadWriteOnce {
|
||||
needValidUsed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
used, rbdOutput, err := util.rbdStatus(&b)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput)
|
||||
}
|
||||
return !used, nil
|
||||
return !needValidUsed || !used, nil
|
||||
})
|
||||
// Return error if rbd image has not become available for the specified timeout.
|
||||
if err == wait.ErrWaitTimeout {
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go
generated
vendored
@ -417,7 +417,7 @@ func (c *sioClient) WaitForAttachedDevice(token string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
go func() {
|
||||
glog.V(4).Infof(log("waiting for volume %s to be mapped/attached", token))
|
||||
glog.V(4).Info(log("waiting for volume %s to be mapped/attached", token))
|
||||
}()
|
||||
if path, ok := devMap[token]; ok {
|
||||
glog.V(4).Info(log("device %s mapped to vol %s", path, token))
|
||||
@ -451,7 +451,7 @@ func (c *sioClient) WaitForDetachedDevice(token string) error {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
glog.V(4).Infof(log("waiting for volume %s to be unmapped/detached", token))
|
||||
glog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token))
|
||||
}()
|
||||
// cant find vol id, then ok.
|
||||
if _, ok := devMap[token]; !ok {
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go
generated
vendored
@ -27,7 +27,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
sioName = "scaleio"
|
||||
sioPluginName = "kubernetes.io/scaleio"
|
||||
sioConfigFileName = "sioconf.dat"
|
||||
)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util_test.go
generated
vendored
@ -159,11 +159,11 @@ func TestUtilSaveConfig(t *testing.T) {
|
||||
confKey.sslEnabled: "false",
|
||||
}
|
||||
if err := saveConfig(config, data); err != nil {
|
||||
t.Fatal("failed while saving data", err)
|
||||
t.Fatalf("failed while saving data: %v", err)
|
||||
}
|
||||
file, err := os.Open(config)
|
||||
if err != nil {
|
||||
t.Fatal("failed to open conf file: ", file)
|
||||
t.Fatalf("failed to open conf file %s: %v", config, err)
|
||||
}
|
||||
defer file.Close()
|
||||
dataRcvd := map[string]string{}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go
generated
vendored
@ -252,13 +252,17 @@ func (v *sioVolume) Delete() error {
|
||||
// ************************
|
||||
var _ volume.Provisioner = &sioVolume{}
|
||||
|
||||
func (v *sioVolume) Provision() (*api.PersistentVolume, error) {
|
||||
func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.TopologySelectorTerm) (*api.PersistentVolume, error) {
|
||||
glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name))
|
||||
|
||||
if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
if util.CheckPersistentVolumeClaimModeBlock(v.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", v.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
// setup volume attrributes
|
||||
genName := v.generateName("k8svol", 11)
|
||||
var oneGig int64 = 1024 * 1024 * 1024
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume_test.go
generated
vendored
@ -296,7 +296,7 @@ func TestVolumeProvisioner(t *testing.T) {
|
||||
}
|
||||
sioVol.sioMgr.client = sio
|
||||
|
||||
spec, err := provisioner.Provision()
|
||||
spec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("call to Provision() failed: %v", err)
|
||||
}
|
||||
@ -467,7 +467,7 @@ func TestVolumeProvisionerWithZeroCapacity(t *testing.T) {
|
||||
}
|
||||
sioVol.sioMgr.client = sio
|
||||
|
||||
_, err = provisioner.Provision()
|
||||
_, err = provisioner.Provision(nil, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("call to Provision() should fail with invalid capacity")
|
||||
}
|
||||
@ -516,7 +516,7 @@ func TestVolumeProvisionerWithSecretNamespace(t *testing.T) {
|
||||
}
|
||||
sioVol.sioMgr.client = sio
|
||||
|
||||
spec, err := sioVol.Provision()
|
||||
spec, err := sioVol.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("call to Provision() failed: %v", err)
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go
generated
vendored
@ -190,12 +190,6 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optional := b.source.Optional != nil && *b.source.Optional
|
||||
secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName)
|
||||
@ -212,6 +206,13 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBytes := totalSecretBytes(secret)
|
||||
glog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
@ -242,7 +243,6 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go
generated
vendored
@ -560,10 +560,13 @@ type storageosProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &storageosProvisioner{}
|
||||
|
||||
func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *storageosProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
|
||||
}
|
||||
|
||||
var adminSecretName, adminSecretNamespace string
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_test.go
generated
vendored
@ -269,7 +269,7 @@ func TestPlugin(t *testing.T) {
|
||||
t.Errorf("newProvisionerInternal() failed: %v", err)
|
||||
}
|
||||
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Provision() failed: %v", err)
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util_test.go
generated
vendored
@ -184,9 +184,9 @@ func TestCreateVolume(t *testing.T) {
|
||||
if len(vol.Labels) == 0 {
|
||||
t.Error("CreateVolume() Labels are empty")
|
||||
} else {
|
||||
var val string
|
||||
var ok bool
|
||||
for k, v := range labels {
|
||||
var val string
|
||||
var ok bool
|
||||
if val, ok = vol.Labels[k]; !ok {
|
||||
t.Errorf("CreateVolume() Label %s not set", k)
|
||||
}
|
||||
@ -194,8 +194,6 @@ func TestCreateVolume(t *testing.T) {
|
||||
t.Errorf("CreateVolume() returned unexpected Label value %s", val)
|
||||
}
|
||||
}
|
||||
var val string
|
||||
var ok bool
|
||||
if val, ok = vol.Labels["labelfromapi"]; !ok {
|
||||
t.Error("CreateVolume() Label from api not set")
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/testing/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/testing/BUILD
generated
vendored
@ -22,12 +22,14 @@ go_library(
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/mock:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
73
vendor/k8s.io/kubernetes/pkg/volume/testing/testing.go
generated
vendored
73
vendor/k8s.io/kubernetes/pkg/volume/testing/testing.go
generated
vendored
@ -27,12 +27,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
@ -94,6 +96,10 @@ func (f *fakeVolumeHost) GetVolumeDevicePluginDir(pluginName string) string {
|
||||
return path.Join(f.rootDir, "plugins", pluginName, "volumeDevices")
|
||||
}
|
||||
|
||||
func (f *fakeVolumeHost) GetPodsDir() string {
|
||||
return path.Join(f.rootDir, "pods")
|
||||
}
|
||||
|
||||
func (f *fakeVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
|
||||
return path.Join(f.rootDir, "pods", string(podUID), "volumes", pluginName, volumeName)
|
||||
}
|
||||
@ -178,6 +184,12 @@ func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.Co
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeVolumeHost) GetServiceAccountTokenFunc() func(string, string, *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return f.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(name, tr)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeVolumeHost) GetNodeLabels() (map[string]string, error) {
|
||||
if f.nodeLabels == nil {
|
||||
f.nodeLabels = map[string]string{"test-label": "test-value"}
|
||||
@ -189,6 +201,10 @@ func (f *fakeVolumeHost) GetNodeName() types.NodeName {
|
||||
return types.NodeName(f.nodeName)
|
||||
}
|
||||
|
||||
func (f *fakeVolumeHost) GetEventRecorder() record.EventRecorder {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
||||
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
||||
return []VolumePlugin{
|
||||
@ -250,7 +266,17 @@ func (plugin *FakeVolumePlugin) GetPluginName() string {
|
||||
}
|
||||
|
||||
func (plugin *FakeVolumePlugin) GetVolumeName(spec *Spec) (string, error) {
|
||||
return spec.Name(), nil
|
||||
var volumeName string
|
||||
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
|
||||
volumeName = spec.Volume.GCEPersistentDisk.PDName
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
spec.PersistentVolume.Spec.GCEPersistentDisk != nil {
|
||||
volumeName = spec.PersistentVolume.Spec.GCEPersistentDisk.PDName
|
||||
}
|
||||
if volumeName == "" {
|
||||
volumeName = spec.Name()
|
||||
}
|
||||
return volumeName, nil
|
||||
}
|
||||
|
||||
func (plugin *FakeVolumePlugin) CanSupport(spec *Spec) bool {
|
||||
@ -420,6 +446,15 @@ func (plugin *FakeVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]st
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// Expandable volume support
|
||||
func (plugin *FakeVolumePlugin) ExpandVolumeDevice(spec *Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
return resource.Quantity{}, nil
|
||||
}
|
||||
|
||||
func (plugin *FakeVolumePlugin) RequiresFSResize() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type FakeFileVolumePlugin struct {
|
||||
}
|
||||
|
||||
@ -484,6 +519,7 @@ type FakeVolume struct {
|
||||
GetDeviceMountPathCallCount int
|
||||
SetUpDeviceCallCount int
|
||||
TearDownDeviceCallCount int
|
||||
MapDeviceCallCount int
|
||||
GlobalMapPathCallCount int
|
||||
PodDeviceMapPathCallCount int
|
||||
}
|
||||
@ -614,6 +650,21 @@ func (fv *FakeVolume) GetTearDownDeviceCallCount() int {
|
||||
return fv.TearDownDeviceCallCount
|
||||
}
|
||||
|
||||
// Block volume support
|
||||
func (fv *FakeVolume) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, pod types.UID) error {
|
||||
fv.Lock()
|
||||
defer fv.Unlock()
|
||||
fv.MapDeviceCallCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Block volume support
|
||||
func (fv *FakeVolume) GetMapDeviceCallCount() int {
|
||||
fv.RLock()
|
||||
defer fv.RUnlock()
|
||||
return fv.MapDeviceCallCount
|
||||
}
|
||||
|
||||
func (fv *FakeVolume) Attach(spec *Spec, nodeName types.NodeName) (string, error) {
|
||||
fv.Lock()
|
||||
defer fv.Unlock()
|
||||
@ -705,7 +756,7 @@ type FakeProvisioner struct {
|
||||
Host VolumeHost
|
||||
}
|
||||
|
||||
func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (fc *FakeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
@ -1093,6 +1144,24 @@ func VerifyGetPodDeviceMapPathCallCount(
|
||||
expectedPodDeviceMapPathCallCount)
|
||||
}
|
||||
|
||||
// VerifyGetMapDeviceCallCount ensures that at least one of the Mappers for this
|
||||
// plugin has the expectedMapDeviceCallCount number of calls. Otherwise it
|
||||
// returns an error.
|
||||
func VerifyGetMapDeviceCallCount(
|
||||
expectedMapDeviceCallCount int,
|
||||
fakeVolumePlugin *FakeVolumePlugin) error {
|
||||
for _, mapper := range fakeVolumePlugin.GetBlockVolumeMapper() {
|
||||
actualCallCount := mapper.GetMapDeviceCallCount()
|
||||
if actualCallCount >= expectedMapDeviceCallCount {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf(
|
||||
"No Mapper have expected MapdDeviceCallCount. Expected: <%v>.",
|
||||
expectedMapDeviceCallCount)
|
||||
}
|
||||
|
||||
// GetTestVolumePluginMgr creates, initializes, and returns a test volume plugin
|
||||
// manager and fake volume plugin using a fake volume host.
|
||||
func GetTestVolumePluginMgr(
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/util/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/util/BUILD
generated
vendored
@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"atomic_writer.go",
|
||||
"attach_limit.go",
|
||||
"device_util.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
@ -59,6 +60,7 @@ go_library(
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@ -91,7 +93,6 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
@ -230,7 +230,7 @@ func validatePayload(payload map[string]FileProjection) (map[string]FileProjecti
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cleanPayload[path.Clean(k)] = content
|
||||
cleanPayload[filepath.Clean(k)] = content
|
||||
}
|
||||
|
||||
return cleanPayload, nil
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user