vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -120,13 +120,11 @@ func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string
}
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
volumeID := volumeSource.VolumeID
instanceID, err := attacher.nodeInstanceID(nodeName)
if err != nil {
return "", err
@ -175,15 +173,15 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
volumeSpecMap := make(map[string]*volume.Spec)
volumeIDList := []string{}
for _, spec := range specs {
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
volumeIDList = append(volumeIDList, volumeSource.VolumeID)
volumeIDList = append(volumeIDList, volumeID)
volumesAttachedCheck[spec] = true
volumeSpecMap[volumeSource.VolumeID] = spec
volumeSpecMap[volumeID] = spec
}
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
@ -207,13 +205,11 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
volumeID := volumeSource.VolumeID
if devicePath == "" {
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty", volumeID)
}
@ -252,12 +248,12 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
return makeGlobalPDName(attacher.host, volumeSource.VolumeID), nil
return makeGlobalPDName(attacher.host, volumeID), nil
}
// FIXME: this method can be further pruned.
@ -275,7 +271,7 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
_, volumeFSType, readOnly, err := getVolumeInfo(spec)
if err != nil {
return err
}
@ -287,7 +283,7 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
if notMnt {
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeFSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err

View File

@ -393,7 +393,7 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: name,
ReadOnly: readOnly,
},
@ -712,10 +712,6 @@ func (instances *instances) NodeAddressesByProviderID(ctx context.Context, provi
return []v1.NodeAddress{}, errors.New("Not implemented")
}
func (instances *instances) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
return "", errors.New("Not implemented")
}
func (instances *instances) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
return instances.instanceID, nil
}
@ -732,12 +728,16 @@ func (instances *instances) InstanceExistsByProviderID(ctx context.Context, prov
return false, errors.New("unimplemented")
}
func (instances *instances) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
return false, errors.New("unimplemented")
}
func (instances *instances) List(filter string) ([]types.NodeName, error) {
return []types.NodeName{}, errors.New("Not implemented")
}
func (instances *instances) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
return errors.New("Not implemented")
return cloudprovider.NotImplemented
}
func (instances *instances) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {

View File

@ -79,6 +79,10 @@ const (
cinderVolumePluginName = "kubernetes.io/cinder"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(cinderVolumePluginName), volName)
}
func (plugin *cinderPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.volumeLocks = keymutex.NewKeyMutex()
@ -90,12 +94,12 @@ func (plugin *cinderPlugin) GetPluginName() string {
}
func (plugin *cinderPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
return volumeSource.VolumeID, nil
return volumeID, nil
}
func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
@ -125,22 +129,20 @@ func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.
}
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
cinder, readOnly, err := getVolumeSource(spec)
pdName, fsType, readOnly, err := getVolumeInfo(spec)
if err != nil {
return nil, err
}
pdName := cinder.VolumeID
fsType := cinder.FSType
return &cinderVolumeMounter{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
mounter: mounter,
manager: manager,
plugin: plugin,
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
mounter: mounter,
manager: manager,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
fsType: fsType,
readOnly: readOnly,
@ -154,11 +156,12 @@ func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volu
func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) {
return &cinderVolumeUnmounter{
&cinderVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
@ -242,7 +245,7 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
cinder, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return oldSize, err
}
@ -251,12 +254,12 @@ func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resour
return oldSize, err
}
expandedSize, err := cloud.ExpandVolume(cinder.VolumeID, oldSize, newSize)
expandedSize, err := cloud.ExpandVolume(volumeID, oldSize, newSize)
if err != nil {
return oldSize, err
}
glog.V(2).Infof("volume %s expanded to new size %d successfully", cinder.VolumeID, int(newSize.Value()))
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value()))
return expandedSize, nil
}
@ -303,7 +306,7 @@ type cinderVolume struct {
// diskMounter provides the interface that is used to mount the actual block device.
blockDeviceMounter mount.Interface
plugin *cinderPlugin
volume.MetricsNil
volume.MetricsProvider
}
func (b *cinderVolumeMounter) GetAttributes() volume.Attributes {
@ -397,8 +400,7 @@ func makeGlobalPDName(host volume.VolumeHost, devName string) string {
}
func (cd *cinderVolume) GetPath() string {
name := cinderVolumePluginName
return cd.plugin.host.GetPodVolumeDir(cd.podUID, kstrings.EscapeQualifiedNameForDisk(name), cd.volName)
return getPath(cd.podUID, cd.volName, cd.plugin.host)
}
type cinderVolumeUnmounter struct {
@ -484,8 +486,7 @@ type cinderVolumeDeleter struct {
var _ volume.Deleter = &cinderVolumeDeleter{}
func (r *cinderVolumeDeleter) GetPath() string {
name := cinderVolumePluginName
return r.plugin.host.GetPodVolumeDir(r.podUID, kstrings.EscapeQualifiedNameForDisk(name), r.volName)
return getPath(r.podUID, r.volName, r.plugin.host)
}
func (r *cinderVolumeDeleter) Delete() error {
@ -499,11 +500,15 @@ type cinderVolumeProvisioner struct {
var _ volume.Provisioner = &cinderVolumeProvisioner{}
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
if err != nil {
return nil, err
@ -524,7 +529,7 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: volumeID,
FSType: fstype,
ReadOnly: false,
@ -540,13 +545,13 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
return pv, nil
}
func getVolumeSource(spec *volume.Spec) (*v1.CinderVolumeSource, bool, error) {
func getVolumeInfo(spec *volume.Spec) (string, string, bool, error) {
if spec.Volume != nil && spec.Volume.Cinder != nil {
return spec.Volume.Cinder, spec.Volume.Cinder.ReadOnly, nil
return spec.Volume.Cinder.VolumeID, spec.Volume.Cinder.FSType, spec.Volume.Cinder.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Cinder != nil {
return spec.PersistentVolume.Spec.Cinder, spec.ReadOnly, nil
return spec.PersistentVolume.Spec.Cinder.VolumeID, spec.PersistentVolume.Spec.Cinder.FSType, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a Cinder volume type")
return "", "", false, fmt.Errorf("Spec does not reference a Cinder volume type")
}

View File

@ -51,7 +51,7 @@ func TestCanSupport(t *testing.T) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
@ -196,7 +196,7 @@ func TestPlugin(t *testing.T) {
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
persistentSpec, err := provisioner.Provision()
persistentSpec, err := provisioner.Provision(nil, nil)
if err != nil {
t.Errorf("Provision() failed: %v", err)
}