mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
2
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
@ -29,6 +30,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go
generated
vendored
@ -59,7 +59,7 @@ func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath str
|
||||
}
|
||||
|
||||
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
volumeSource, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -68,7 +68,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName
|
||||
|
||||
// awsCloud.AttachDisk checks if disk is already attached to node and
|
||||
// succeeds in that case, so no need to do that separately.
|
||||
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly)
|
||||
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err)
|
||||
return "", err
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher_test.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher_test.go
generated
vendored
@ -76,15 +76,14 @@ type testcase struct {
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
diskName := aws.KubernetesVolumeID("disk")
|
||||
nodeName := types.NodeName("instance")
|
||||
readOnly := false
|
||||
spec := createVolSpec(diskName, readOnly)
|
||||
spec := createVolSpec(diskName, false)
|
||||
attachError := errors.New("Fake attach error")
|
||||
detachError := errors.New("Fake detach error")
|
||||
tests := []testcase{
|
||||
// Successful Attach call
|
||||
{
|
||||
name: "Attach_Positive",
|
||||
attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil},
|
||||
attach: attachCall{diskName, nodeName, "/dev/sda", nil},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
@ -95,7 +94,7 @@ func TestAttachDetach(t *testing.T) {
|
||||
// Attach call fails
|
||||
{
|
||||
name: "Attach_Negative",
|
||||
attach: attachCall{diskName, nodeName, readOnly, "", attachError},
|
||||
attach: attachCall{diskName, nodeName, "", attachError},
|
||||
test: func(testcase *testcase) (string, error) {
|
||||
attacher := newAttacher(testcase)
|
||||
return attacher.Attach(spec, nodeName)
|
||||
@ -195,7 +194,6 @@ func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
|
||||
type attachCall struct {
|
||||
diskName aws.KubernetesVolumeID
|
||||
nodeName types.NodeName
|
||||
readOnly bool
|
||||
retDeviceName string
|
||||
ret error
|
||||
}
|
||||
@ -214,7 +212,7 @@ type diskIsAttachedCall struct {
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) {
|
||||
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
@ -234,12 +232,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
|
||||
}
|
||||
|
||||
if expected.readOnly != readOnly {
|
||||
testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly)
|
||||
return "", errors.New("Unexpected AttachDisk call: wrong readOnly")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret)
|
||||
glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
|
||||
|
||||
return expected.retDeviceName, expected.ret
|
||||
}
|
||||
|
57
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go
generated
vendored
@ -17,10 +17,11 @@ limitations under the License.
|
||||
package aws_ebs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -29,7 +30,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -94,6 +97,47 @@ func (plugin *awsElasticBlockStorePlugin) SupportsBulkVolumeVerification() bool
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, error) {
|
||||
volumeLimits := map[string]int64{
|
||||
util.EBSVolumeLimitKey: 39,
|
||||
}
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
|
||||
// if we can't fetch cloudprovider we return an error
|
||||
// hoping external CCM or admin can set it. Returning
|
||||
// default values from here will mean, no one can
|
||||
// override them.
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("No cloudprovider present")
|
||||
}
|
||||
|
||||
if cloud.ProviderName() != aws.ProviderName {
|
||||
return nil, fmt.Errorf("Expected aws cloud, found %s", cloud.ProviderName())
|
||||
}
|
||||
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
glog.V(3).Infof("Failed to get instances from cloud provider")
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance type from AWS cloud provider")
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
if ok, _ := regexp.MatchString("^[cm]5.*", instanceType); ok {
|
||||
volumeLimits[util.EBSVolumeLimitKey] = 25
|
||||
}
|
||||
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) VolumeLimitKey(spec *volume.Spec) string {
|
||||
return util.EBSVolumeLimitKey
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
@ -266,6 +310,7 @@ func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice(
|
||||
}
|
||||
|
||||
var _ volume.ExpandableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||
var _ volume.VolumePluginWithAttachLimits = &awsElasticBlockStorePlugin{}
|
||||
|
||||
// Abstract interface to PD operations.
|
||||
type ebsManager interface {
|
||||
@ -387,12 +432,12 @@ func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) s
|
||||
// Clean up the URI to be more fs-friendly
|
||||
name := string(volumeID)
|
||||
name = strings.Replace(name, "://", "/", -1)
|
||||
return path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
|
||||
return filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
|
||||
}
|
||||
|
||||
// Reverses the mapping done in makeGlobalPDPath
|
||||
func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
|
||||
basePath := path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
|
||||
basePath := filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
|
||||
rel, err := filepath.Rel(basePath, globalPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
|
||||
@ -454,7 +499,7 @@ type awsElasticBlockStoreProvisioner struct {
|
||||
|
||||
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
|
||||
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
@ -508,5 +553,9 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, err
|
||||
}
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go
generated
vendored
@ -18,7 +18,6 @@ package aws_ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -30,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
@ -156,6 +156,10 @@ func (b *awsElasticBlockStoreMapper) SetUpDevice() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (b *awsElasticBlockStoreMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
|
||||
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
|
||||
}
|
||||
|
||||
// GetGlobalMapPath returns global map path and error
|
||||
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
|
||||
// plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX
|
||||
@ -164,7 +168,7 @@ func (ebs *awsElasticBlockStore) GetGlobalMapPath(spec *volume.Spec) (string, er
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(ebs.plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName), string(volumeSource.VolumeID)), nil
|
||||
return filepath.Join(ebs.plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName), string(volumeSource.VolumeID)), nil
|
||||
}
|
||||
|
||||
// GetPodDeviceMapPath returns pod device map path and volume name
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block_test.go
generated
vendored
@ -18,7 +18,7 @@ package aws_ebs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -47,7 +47,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
|
||||
//Bad Path
|
||||
badspec, err := getVolumeSpecFromGlobalMapPath("")
|
||||
@ -102,8 +102,8 @@ func TestGetPodAndPluginMapPaths(t *testing.T) {
|
||||
//deferred clean up
|
||||
defer os.RemoveAll(tmpVDir)
|
||||
|
||||
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := path.Join(tmpVDir, testPodPath)
|
||||
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
|
||||
expectedPodPath := filepath.Join(tmpVDir, testPodPath)
|
||||
|
||||
spec := getTestVolume(false, true /*isBlock*/)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go
generated
vendored
@ -19,7 +19,7 @@ package aws_ebs
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -129,7 +129,7 @@ func TestPlugin(t *testing.T) {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
|
||||
volPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s", path)
|
||||
@ -173,7 +173,7 @@ func TestPlugin(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error creating new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user