mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor updates
This commit is contained in:
80
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
80
vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cinder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@ -27,16 +28,14 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
type cinderDiskAttacher struct {
|
||||
host volume.VolumeHost
|
||||
cinderProvider CinderProvider
|
||||
cinderProvider BlockStorageProvider
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &cinderDiskAttacher{}
|
||||
@ -44,20 +43,21 @@ var _ volume.Attacher = &cinderDiskAttacher{}
|
||||
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
|
||||
|
||||
const (
|
||||
checkSleepDuration = 1 * time.Second
|
||||
operationFinishInitDealy = 1 * time.Second
|
||||
probeVolumeInitDelay = 1 * time.Second
|
||||
probeVolumeFactor = 2.0
|
||||
operationFinishInitDelay = 1 * time.Second
|
||||
operationFinishFactor = 1.1
|
||||
operationFinishSteps = 10
|
||||
diskAttachInitDealy = 1 * time.Second
|
||||
diskAttachInitDelay = 1 * time.Second
|
||||
diskAttachFactor = 1.2
|
||||
diskAttachSteps = 15
|
||||
diskDetachInitDealy = 1 * time.Second
|
||||
diskDetachInitDelay = 1 * time.Second
|
||||
diskDetachFactor = 1.2
|
||||
diskDetachSteps = 13
|
||||
)
|
||||
|
||||
func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
cinder, err := plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -74,7 +74,7 @@ func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string
|
||||
|
||||
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: operationFinishInitDealy,
|
||||
Duration: operationFinishInitDelay,
|
||||
Factor: operationFinishFactor,
|
||||
Steps: operationFinishSteps,
|
||||
}
|
||||
@ -99,7 +99,7 @@ func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error
|
||||
|
||||
func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: diskAttachInitDealy,
|
||||
Duration: diskAttachInitDelay,
|
||||
Factor: diskAttachFactor,
|
||||
Steps: diskAttachSteps,
|
||||
}
|
||||
@ -186,23 +186,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
|
||||
volumeSpecMap[volumeSource.VolumeID] = spec
|
||||
}
|
||||
|
||||
instanceID, err := attacher.nodeInstanceID(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// If node doesn't exist, OpenStack Nova will assume the volumes are not attached to it.
|
||||
// Mark the volumes as detached and return false without error.
|
||||
glog.Warningf("VolumesAreAttached: node %q does not exist.", nodeName)
|
||||
for spec := range volumesAttachedCheck {
|
||||
volumesAttachedCheck[spec] = false
|
||||
}
|
||||
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
|
||||
attachedResult, err := attacher.cinderProvider.DisksAreAttached(instanceID, volumeIDList)
|
||||
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
@ -231,14 +215,15 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
|
||||
volumeID := volumeSource.VolumeID
|
||||
|
||||
if devicePath == "" {
|
||||
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID)
|
||||
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty", volumeID)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(checkSleepDuration)
|
||||
ticker := time.NewTicker(probeVolumeInitDelay)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
duration := probeVolumeInitDelay
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
@ -252,12 +237,15 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
|
||||
if exists && err == nil {
|
||||
glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
|
||||
return devicePath, nil
|
||||
} else {
|
||||
// Log an error, and continue checking periodically
|
||||
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
|
||||
}
|
||||
// Log an error, and continue checking periodically
|
||||
glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
|
||||
// Using exponential backoff instead of linear
|
||||
ticker.Stop()
|
||||
duration = time.Duration(float64(duration) * probeVolumeFactor)
|
||||
ticker = time.NewTicker(duration)
|
||||
case <-timer.C:
|
||||
return "", fmt.Errorf("Could not find attached Cinder disk %q. Timeout waiting for mount paths to be created.", volumeID)
|
||||
return "", fmt.Errorf("could not find attached Cinder disk %q. Timeout waiting for mount paths to be created", volumeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -297,8 +285,8 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
||||
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
@ -310,13 +298,13 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
|
||||
|
||||
type cinderDiskDetacher struct {
|
||||
mounter mount.Interface
|
||||
cinderProvider CinderProvider
|
||||
cinderProvider BlockStorageProvider
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &cinderDiskDetacher{}
|
||||
|
||||
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
cinder, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||
cinder, err := plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -328,7 +316,7 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
|
||||
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: operationFinishInitDealy,
|
||||
Duration: operationFinishInitDelay,
|
||||
Factor: operationFinishFactor,
|
||||
Steps: operationFinishSteps,
|
||||
}
|
||||
@ -353,7 +341,7 @@ func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error
|
||||
|
||||
func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: diskDetachInitDealy,
|
||||
Duration: diskDetachInitDelay,
|
||||
Factor: diskDetachFactor,
|
||||
Steps: diskDetachSteps,
|
||||
}
|
||||
@ -375,20 +363,10 @@ func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string
|
||||
|
||||
func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
volumeID := path.Base(volumeName)
|
||||
instances, res := detacher.cinderProvider.Instances()
|
||||
if !res {
|
||||
return fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
|
||||
instanceID = instanceID[(ind + 1):]
|
||||
}
|
||||
|
||||
if err := detacher.waitOperationFinished(volumeID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
||||
attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID)
|
||||
if err != nil {
|
||||
// Log error and continue with detach
|
||||
glog.Errorf(
|
||||
@ -423,7 +401,7 @@ func (attacher *cinderDiskAttacher) nodeInstanceID(nodeName types.NodeName) (str
|
||||
if !res {
|
||||
return "", fmt.Errorf("failed to list openstack instances")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
instanceID, err := instances.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
Reference in New Issue
Block a user