vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -17,16 +17,21 @@ limitations under the License.
package gce_pd
import (
"context"
"fmt"
"os"
"path"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
@ -47,11 +52,24 @@ var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.DeletableVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.ExpandableVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.VolumePluginWithAttachLimits = &gcePersistentDiskPlugin{}
const (
gcePersistentDiskPluginName = "kubernetes.io/gce-pd"
)
// The constants are used to map from the machine type (number of CPUs) to the limit of
// persistent disks that can be attached to an instance. Please refer to gcloud doc
// https://cloud.google.com/compute/docs/disks/#increased_persistent_disk_limits
const (
OneCPU = 1
EightCPUs = 8
VolumeLimit16 = 16
VolumeLimit32 = 32
VolumeLimit64 = 64
VolumeLimit128 = 128
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(gcePersistentDiskPluginName), volName)
}
@ -98,6 +116,58 @@ func (plugin *gcePersistentDiskPlugin) GetAccessModes() []v1.PersistentVolumeAcc
}
}
func (plugin *gcePersistentDiskPlugin) GetVolumeLimits() (map[string]int64, error) {
volumeLimits := map[string]int64{
util.GCEVolumeLimitKey: VolumeLimit16,
}
cloud := plugin.host.GetCloudProvider()
// if we can't fetch cloudprovider we return an error
// hoping external CCM or admin can set it. Returning
// default values from here will mean, no one can
// override them.
if cloud == nil {
return nil, fmt.Errorf("No cloudprovider present")
}
if cloud.ProviderName() != gcecloud.ProviderName {
return nil, fmt.Errorf("Expected gce cloud got %s", cloud.ProviderName())
}
instances, ok := cloud.Instances()
if !ok {
glog.Warning("Failed to get instances from cloud provider")
return volumeLimits, nil
}
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
if err != nil {
glog.Errorf("Failed to get instance type from GCE cloud provider")
return volumeLimits, nil
}
if strings.HasPrefix(instanceType, "n1-") {
splits := strings.Split(instanceType, "-")
if len(splits) < 3 {
return volumeLimits, nil
}
last := splits[2]
if num, err := strconv.Atoi(last); err == nil {
if num == OneCPU {
volumeLimits[util.GCEVolumeLimitKey] = VolumeLimit32
} else if num < EightCPUs {
volumeLimits[util.GCEVolumeLimitKey] = VolumeLimit64
} else {
volumeLimits[util.GCEVolumeLimitKey] = VolumeLimit128
}
}
}
return volumeLimits, nil
}
func (plugin *gcePersistentDiskPlugin) VolumeLimitKey(spec *volume.Spec) string {
return util.GCEVolumeLimitKey
}
func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
@ -396,7 +466,7 @@ type gcePersistentDiskProvisioner struct {
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
@ -448,5 +518,9 @@ func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error)
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
}
return pv, nil
}

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
@ -151,6 +152,10 @@ func (b *gcePersistentDiskMapper) SetUpDevice() (string, error) {
return "", nil
}
func (b *gcePersistentDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/pdName
func (pd *gcePersistentDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {

View File

@ -167,7 +167,7 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Error creating new provisioner:%v", err)
}
persistentSpec, err := provisioner.Provision()
persistentSpec, err := provisioner.Provision(nil, nil)
if err != nil {
t.Errorf("Provision() failed: %v", err)
}