2018-01-09 18:57:14 +00:00
|
|
|
/*
|
|
|
|
Copyright 2016 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cinder
|
|
|
|
|
|
|
|
import (
|
2018-03-06 22:33:18 +00:00
|
|
|
"context"
|
2018-01-09 18:57:14 +00:00
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"k8s.io/api/core/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/types"
|
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2018-11-26 18:23:56 +00:00
|
|
|
"k8s.io/klog"
|
2018-01-09 18:57:14 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/mount"
|
|
|
|
"k8s.io/kubernetes/pkg/volume"
|
|
|
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
|
|
|
)
|
|
|
|
|
|
|
|
type cinderDiskAttacher struct {
|
|
|
|
host volume.VolumeHost
|
2018-03-06 22:33:18 +00:00
|
|
|
cinderProvider BlockStorageProvider
|
2018-01-09 18:57:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var _ volume.Attacher = &cinderDiskAttacher{}
|
|
|
|
|
2018-11-26 18:23:56 +00:00
|
|
|
var _ volume.DeviceMounter = &cinderDiskAttacher{}
|
|
|
|
|
2018-01-09 18:57:14 +00:00
|
|
|
var _ volume.AttachableVolumePlugin = &cinderPlugin{}
|
|
|
|
|
2018-11-26 18:23:56 +00:00
|
|
|
var _ volume.DeviceMountableVolumePlugin = &cinderPlugin{}
|
|
|
|
|
2018-01-09 18:57:14 +00:00
|
|
|
const (
|
2018-03-06 22:33:18 +00:00
|
|
|
probeVolumeInitDelay = 1 * time.Second
|
|
|
|
probeVolumeFactor = 2.0
|
|
|
|
operationFinishInitDelay = 1 * time.Second
|
2018-01-09 18:57:14 +00:00
|
|
|
operationFinishFactor = 1.1
|
|
|
|
operationFinishSteps = 10
|
2018-03-06 22:33:18 +00:00
|
|
|
diskAttachInitDelay = 1 * time.Second
|
2018-01-09 18:57:14 +00:00
|
|
|
diskAttachFactor = 1.2
|
|
|
|
diskAttachSteps = 15
|
2018-03-06 22:33:18 +00:00
|
|
|
diskDetachInitDelay = 1 * time.Second
|
2018-01-09 18:57:14 +00:00
|
|
|
diskDetachFactor = 1.2
|
|
|
|
diskDetachSteps = 13
|
|
|
|
)
|
|
|
|
|
|
|
|
func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
|
2018-03-06 22:33:18 +00:00
|
|
|
cinder, err := plugin.getCloudProvider()
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &cinderDiskAttacher{
|
|
|
|
host: plugin.host,
|
|
|
|
cinderProvider: cinder,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-11-26 18:23:56 +00:00
|
|
|
func (plugin *cinderPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
|
|
|
|
return plugin.NewAttacher()
|
|
|
|
}
|
|
|
|
|
2018-01-09 18:57:14 +00:00
|
|
|
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
|
|
|
mounter := plugin.host.GetMounter(plugin.GetPluginName())
|
2018-11-26 18:23:56 +00:00
|
|
|
return mounter.GetMountRefs(deviceMountPath)
|
2018-01-09 18:57:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error {
|
|
|
|
backoff := wait.Backoff{
|
2018-03-06 22:33:18 +00:00
|
|
|
Duration: operationFinishInitDelay,
|
2018-01-09 18:57:14 +00:00
|
|
|
Factor: operationFinishFactor,
|
|
|
|
Steps: operationFinishSteps,
|
|
|
|
}
|
|
|
|
|
|
|
|
var volumeStatus string
|
|
|
|
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
|
|
|
var pending bool
|
|
|
|
var err error
|
|
|
|
pending, volumeStatus, err = attacher.cinderProvider.OperationPending(volumeID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return !pending, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err == wait.ErrWaitTimeout {
|
|
|
|
err = fmt.Errorf("Volume %q is %s, can't finish within the alloted time", volumeID, volumeStatus)
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string) error {
|
|
|
|
backoff := wait.Backoff{
|
2018-03-06 22:33:18 +00:00
|
|
|
Duration: diskAttachInitDelay,
|
2018-01-09 18:57:14 +00:00
|
|
|
Factor: diskAttachFactor,
|
|
|
|
Steps: diskAttachSteps,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
|
|
|
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return attached, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err == wait.ErrWaitTimeout {
|
|
|
|
err = fmt.Errorf("Volume %q failed to be attached within the alloted time", volumeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
2018-07-18 14:47:22 +00:00
|
|
|
volumeID, _, _, err := getVolumeInfo(spec)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
instanceID, err := attacher.nodeInstanceID(nodeName)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := attacher.waitOperationFinished(volumeID); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
|
|
|
if err != nil {
|
|
|
|
// Log error and continue with attach
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Warningf(
|
2018-01-09 18:57:14 +00:00
|
|
|
"Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v",
|
|
|
|
volumeID, instanceID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == nil && attached {
|
|
|
|
// Volume is already attached to instance.
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID)
|
2018-01-09 18:57:14 +00:00
|
|
|
} else {
|
|
|
|
_, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID)
|
|
|
|
if err == nil {
|
|
|
|
if err = attacher.waitDiskAttached(instanceID, volumeID); err != nil {
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err)
|
2018-01-09 18:57:14 +00:00
|
|
|
return "", err
|
|
|
|
}
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID)
|
2018-01-09 18:57:14 +00:00
|
|
|
} else {
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err)
|
2018-01-09 18:57:14 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID)
|
|
|
|
if err != nil {
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err)
|
2018-01-09 18:57:14 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return devicePath, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
|
|
|
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
|
|
|
volumeSpecMap := make(map[string]*volume.Spec)
|
|
|
|
volumeIDList := []string{}
|
|
|
|
for _, spec := range specs {
|
2018-07-18 14:47:22 +00:00
|
|
|
volumeID, _, _, err := getVolumeInfo(spec)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
2018-01-09 18:57:14 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-07-18 14:47:22 +00:00
|
|
|
volumeIDList = append(volumeIDList, volumeID)
|
2018-01-09 18:57:14 +00:00
|
|
|
volumesAttachedCheck[spec] = true
|
2018-07-18 14:47:22 +00:00
|
|
|
volumeSpecMap[volumeID] = spec
|
2018-01-09 18:57:14 +00:00
|
|
|
}
|
|
|
|
|
2018-03-06 22:33:18 +00:00
|
|
|
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
// Log error and continue with attach
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Errorf(
|
2018-01-09 18:57:14 +00:00
|
|
|
"Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v",
|
|
|
|
volumeIDList, nodeName, err)
|
|
|
|
return volumesAttachedCheck, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for volumeID, attached := range attachedResult {
|
|
|
|
if !attached {
|
|
|
|
spec := volumeSpecMap[volumeID]
|
|
|
|
volumesAttachedCheck[spec] = false
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
2018-01-09 18:57:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return volumesAttachedCheck, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
|
|
|
|
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
|
2018-07-18 14:47:22 +00:00
|
|
|
volumeID, _, _, err := getVolumeInfo(spec)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if devicePath == "" {
|
2018-03-06 22:33:18 +00:00
|
|
|
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty", volumeID)
|
2018-01-09 18:57:14 +00:00
|
|
|
}
|
|
|
|
|
2018-03-06 22:33:18 +00:00
|
|
|
ticker := time.NewTicker(probeVolumeInitDelay)
|
2018-01-09 18:57:14 +00:00
|
|
|
defer ticker.Stop()
|
|
|
|
timer := time.NewTimer(timeout)
|
|
|
|
defer timer.Stop()
|
|
|
|
|
2018-03-06 22:33:18 +00:00
|
|
|
duration := probeVolumeInitDelay
|
2018-01-09 18:57:14 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID)
|
2018-01-09 18:57:14 +00:00
|
|
|
probeAttachedVolume()
|
|
|
|
if !attacher.cinderProvider.ShouldTrustDevicePath() {
|
|
|
|
// Using the Cinder volume ID, find the real device path (See Issue #33128)
|
|
|
|
devicePath = attacher.cinderProvider.GetDevicePath(volumeID)
|
|
|
|
}
|
|
|
|
exists, err := volumeutil.PathExists(devicePath)
|
|
|
|
if exists && err == nil {
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath)
|
2018-01-09 18:57:14 +00:00
|
|
|
return devicePath, nil
|
|
|
|
}
|
2018-03-06 22:33:18 +00:00
|
|
|
// Log an error, and continue checking periodically
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err)
|
2018-03-06 22:33:18 +00:00
|
|
|
// Using exponential backoff instead of linear
|
|
|
|
ticker.Stop()
|
|
|
|
duration = time.Duration(float64(duration) * probeVolumeFactor)
|
|
|
|
ticker = time.NewTicker(duration)
|
2018-01-09 18:57:14 +00:00
|
|
|
case <-timer.C:
|
2018-03-06 22:33:18 +00:00
|
|
|
return "", fmt.Errorf("could not find attached Cinder disk %q. Timeout waiting for mount paths to be created", volumeID)
|
2018-01-09 18:57:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
|
|
|
|
spec *volume.Spec) (string, error) {
|
2018-07-18 14:47:22 +00:00
|
|
|
volumeID, _, _, err := getVolumeInfo(spec)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2018-07-18 14:47:22 +00:00
|
|
|
return makeGlobalPDName(attacher.host, volumeID), nil
|
2018-01-09 18:57:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: this method can be further pruned.
|
|
|
|
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
|
|
|
mounter := attacher.host.GetMounter(cinderVolumePluginName)
|
|
|
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
notMnt = true
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-18 14:47:22 +00:00
|
|
|
_, volumeFSType, readOnly, err := getVolumeInfo(spec)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
options := []string{}
|
|
|
|
if readOnly {
|
|
|
|
options = append(options, "ro")
|
|
|
|
}
|
|
|
|
if notMnt {
|
2018-03-06 22:33:18 +00:00
|
|
|
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
|
|
|
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
2018-07-18 14:47:22 +00:00
|
|
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeFSType, mountOptions)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
os.Remove(deviceMountPath)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type cinderDiskDetacher struct {
|
|
|
|
mounter mount.Interface
|
2018-03-06 22:33:18 +00:00
|
|
|
cinderProvider BlockStorageProvider
|
2018-01-09 18:57:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var _ volume.Detacher = &cinderDiskDetacher{}
|
|
|
|
|
2018-11-26 18:23:56 +00:00
|
|
|
var _ volume.DeviceUnmounter = &cinderDiskDetacher{}
|
|
|
|
|
2018-01-09 18:57:14 +00:00
|
|
|
func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
|
2018-03-06 22:33:18 +00:00
|
|
|
cinder, err := plugin.getCloudProvider()
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &cinderDiskDetacher{
|
|
|
|
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
|
|
|
cinderProvider: cinder,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-11-26 18:23:56 +00:00
|
|
|
func (plugin *cinderPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
|
|
|
|
return plugin.NewDetacher()
|
|
|
|
}
|
|
|
|
|
2018-01-09 18:57:14 +00:00
|
|
|
func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error {
|
|
|
|
backoff := wait.Backoff{
|
2018-03-06 22:33:18 +00:00
|
|
|
Duration: operationFinishInitDelay,
|
2018-01-09 18:57:14 +00:00
|
|
|
Factor: operationFinishFactor,
|
|
|
|
Steps: operationFinishSteps,
|
|
|
|
}
|
|
|
|
|
|
|
|
var volumeStatus string
|
|
|
|
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
|
|
|
var pending bool
|
|
|
|
var err error
|
|
|
|
pending, volumeStatus, err = detacher.cinderProvider.OperationPending(volumeID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return !pending, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err == wait.ErrWaitTimeout {
|
|
|
|
err = fmt.Errorf("Volume %q is %s, can't finish within the alloted time", volumeID, volumeStatus)
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string) error {
|
|
|
|
backoff := wait.Backoff{
|
2018-03-06 22:33:18 +00:00
|
|
|
Duration: diskDetachInitDelay,
|
2018-01-09 18:57:14 +00:00
|
|
|
Factor: diskDetachFactor,
|
|
|
|
Steps: diskDetachSteps,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
|
|
|
attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return !attached, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err == wait.ErrWaitTimeout {
|
|
|
|
err = fmt.Errorf("Volume %q failed to detach within the alloted time", volumeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error {
|
|
|
|
volumeID := path.Base(volumeName)
|
|
|
|
if err := detacher.waitOperationFinished(volumeID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-06 22:33:18 +00:00
|
|
|
attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
// Log error and continue with detach
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Errorf(
|
2018-01-09 18:57:14 +00:00
|
|
|
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
|
|
|
|
volumeID, nodeName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == nil && !attached {
|
|
|
|
// Volume is already detached from node.
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName)
|
2018-01-09 18:57:14 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil {
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err)
|
2018-01-09 18:57:14 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = detacher.waitDiskDetached(instanceID, volumeID); err != nil {
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err)
|
2018-01-09 18:57:14 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-11-26 18:23:56 +00:00
|
|
|
klog.Infof("detached volume %q from node %q", volumeID, nodeName)
|
2018-01-09 18:57:14 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (detacher *cinderDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
|
|
|
return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (attacher *cinderDiskAttacher) nodeInstanceID(nodeName types.NodeName) (string, error) {
|
|
|
|
instances, res := attacher.cinderProvider.Instances()
|
|
|
|
if !res {
|
|
|
|
return "", fmt.Errorf("failed to list openstack instances")
|
|
|
|
}
|
2018-03-06 22:33:18 +00:00
|
|
|
instanceID, err := instances.InstanceID(context.TODO(), nodeName)
|
2018-01-09 18:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
|
|
|
|
instanceID = instanceID[(ind + 1):]
|
|
|
|
}
|
|
|
|
return instanceID, nil
|
|
|
|
}
|