vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

78
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD generated vendored Normal file
View File

@ -0,0 +1,78 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"azure_common.go",
"azure_common_unsupported.go",
"azure_dd.go",
"azure_mounter.go",
"azure_provision.go",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"azure_common_linux.go",
],
"@io_bazel_rules_go//go/platform:windows_amd64": [
"azure_common_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/azure_dd",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = [
"azure_common_test.go",
"azure_dd_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/azure_dd",
library = ":go_default_library",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

11
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS generated vendored Executable file
View File

@ -0,0 +1,11 @@
approvers:
- brendandburns
- rootfs
reviewers:
- rootfs
- brendandburns
- saad-ali
- jsafrane
- jingxu97
- msau42
- andyzhangx

View File

@ -0,0 +1,294 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type azureDiskDetacher struct {
plugin *azureDataDiskPlugin
cloud *azure.Cloud
}
type azureDiskAttacher struct {
plugin *azureDataDiskPlugin
cloud *azure.Cloud
}
var _ volume.Attacher = &azureDiskAttacher{}
var _ volume.Detacher = &azureDiskDetacher{}
// acquire lock to get an lun number
var getLunMutex = keymutex.NewKeyMutex()
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
glog.Warningf("failed to get azure disk spec")
return "", err
}
instanceid, err := a.cloud.InstanceID(nodeName)
if err != nil {
glog.Warningf("failed to get azure instance id")
return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
}
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):]
}
diskController, err := getDiskController(a.plugin.host)
if err != nil {
return "", err
}
lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
if err == cloudprovider.InstanceNotFound {
// Log error and continue with attach
glog.Warningf(
"Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v",
instanceid, err)
}
if err == nil {
// Volume is already attached to node.
glog.V(4).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun)
} else {
glog.V(4).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName)
getLunMutex.LockKey(instanceid)
defer getLunMutex.UnlockKey(instanceid)
lun, err = diskController.GetNextDiskLun(nodeName)
if err != nil {
glog.Warningf("no LUN available for instance %q", nodeName)
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
}
glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
if err == nil {
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
} else {
glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err)
return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err)
}
}
return strconv.Itoa(int(lun)), err
}
func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
volumeSpecMap := make(map[string]*volume.Spec)
volumeIDList := []string{}
for _, spec := range specs {
volumeSource, err := getVolumeSource(spec)
if err != nil {
glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
volumeIDList = append(volumeIDList, volumeSource.DiskName)
volumesAttachedCheck[spec] = true
volumeSpecMap[volumeSource.DiskName] = spec
}
diskController, err := getDiskController(a.plugin.host)
if err != nil {
return nil, err
}
attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
}
for volumeID, attached := range attachedResult {
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
}
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
var err error
lun, err := strconv.Atoi(devicePath)
if err != nil {
return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath)
}
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
exec := a.plugin.host.GetExec(a.plugin.GetPluginName())
io := &osIOHandler{}
scsiHostRescan(io, exec)
diskName := volumeSource.DiskName
nodeName := a.plugin.host.GetHostName()
newDevicePath := ""
err = wait.Poll(1*time.Second, timeout, func() (bool, error) {
if newDevicePath, err = findDiskByLun(lun, io, exec); err != nil {
return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err)
}
// did we find it?
if newDevicePath != "" {
// the curent sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on
// Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve
// the root case on Azure.
formatIfNotFormatted(newDevicePath, *volumeSource.FSType, exec)
return true, nil
}
return false, fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun)
})
return newDevicePath, err
}
// to avoid name conflicts (similar *.vhd name)
// we use hash diskUri and we use it as device mount target.
// this is generalized for both managed and blob disks
// we also prefix the hash with m/b based on disk kind
func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
if volumeSource.Kind == nil { // this spec was constructed from info on the node
pdPath := path.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI)
return pdPath, nil
}
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
}
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.plugin.host.GetMounter(azureDataDiskPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
dir := deviceMountPath
if runtime.GOOS == "windows" {
// in windows, as we use mklink, only need to MkdirAll for parent directory
dir = filepath.Dir(deviceMountPath)
}
if err := os.MkdirAll(dir, 0750); err != nil {
return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err)
}
notMnt = true
} else {
return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err)
}
}
volumeSource, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if notMnt {
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
if err != nil {
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr)
}
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err)
}
}
return nil
}
// Detach detaches disk from Azure VM.
func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error {
if diskURI == "" {
return fmt.Errorf("invalid disk to detach: %q", diskURI)
}
instanceid, err := d.cloud.InstanceID(nodeName)
if err != nil {
glog.Warningf("no instance id for node %q, skip detaching", nodeName)
return nil
}
if ind := strings.LastIndex(instanceid, "/"); ind >= 0 {
instanceid = instanceid[(ind + 1):]
}
glog.V(4).Infof("detach %v from node %q", diskURI, nodeName)
diskController, err := getDiskController(d.plugin.host)
if err != nil {
return err
}
err = diskController.DetachDiskByName("", diskURI, nodeName)
if err != nil {
glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
}
glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName)
return err
}
// UnmountDevice unmounts the volume on the node
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
if err == nil {
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
} else {
glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
}
return err
}

View File

@ -0,0 +1,206 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"io/ioutil"
"os"
"path"
libstrings "strings"
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
const (
defaultFSType = "ext4"
defaultStorageAccountType = storage.StandardLRS
defaultAzureDiskKind = v1.AzureSharedBlobDisk
)
type dataDisk struct {
volume.MetricsProvider
volumeName string
diskName string
podUID types.UID
}
var (
supportedCachingModes = sets.NewString(
string(api.AzureDataDiskCachingNone),
string(api.AzureDataDiskCachingReadOnly),
string(api.AzureDataDiskCachingReadWrite))
supportedDiskKinds = sets.NewString(
string(api.AzureSharedBlobDisk),
string(api.AzureDedicatedBlobDisk),
string(api.AzureManagedDisk))
supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS", "Standard_GRS", "Standard_RAGRS")
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(azureDataDiskPluginName), volName)
}
// creates a unique path for disks (even if they share the same *.vhd name)
func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) {
diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps.
uniqueDiskNameTemplate := "%s%s"
hashedDiskUri := azure.MakeCRC32(diskUri)
prefix := "b"
if isManaged {
prefix = "m"
}
// "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }"
diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri)
pdPath := path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName)
return pdPath, nil
}
func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk {
var metricProvider volume.MetricsProvider
if podUID != "" {
metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host))
}
return &dataDisk{
MetricsProvider: metricProvider,
volumeName: volumeName,
diskName: diskName,
podUID: podUID,
}
}
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
return spec.Volume.AzureDisk, nil
}
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
return spec.PersistentVolume.Spec.AzureDisk, nil
}
return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type")
}
func normalizeFsType(fsType string) string {
if fsType == "" {
return defaultFSType
}
return fsType
}
func normalizeKind(kind string) (v1.AzureDataDiskKind, error) {
if kind == "" {
return defaultAzureDiskKind, nil
}
if !supportedDiskKinds.Has(kind) {
return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List())
}
return v1.AzureDataDiskKind(kind), nil
}
func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) {
if storageAccountType == "" {
return defaultStorageAccountType, nil
}
if !supportedStorageAccountTypes.Has(storageAccountType) {
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List())
}
return storage.SkuName(storageAccountType), nil
}
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {
if cachingMode == "" {
return v1.AzureDataDiskCachingReadWrite, nil
}
if !supportedCachingModes.Has(string(cachingMode)) {
return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List())
}
return cachingMode, nil
}
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
Readlink(name string) (string, error)
ReadFile(filename string) ([]byte, error)
}
//TODO: check if priming the iscsi interface is actually needed
type osIOHandler struct{}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
func (handler *osIOHandler) Readlink(name string) (string, error) {
return os.Readlink(name)
}
func (handler *osIOHandler) ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func getDiskController(host volume.VolumeHost) (DiskController, error) {
cloudProvider := host.GetCloudProvider()
az, ok := cloudProvider.(*azure.Cloud)
if !ok || az == nil {
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return az, nil
}
func getCloud(host volume.VolumeHost) (*azure.Cloud, error) {
cloudProvider := host.GetCloudProvider()
az, ok := cloudProvider.(*azure.Cloud)
if !ok || az == nil {
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return az, nil
}
func strFirstLetterToUpper(str string) string {
if len(str) < 2 {
return str
}
return libstrings.ToUpper(string(str[0])) + str[1:]
}

View File

@ -0,0 +1,189 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"path"
"strconv"
libstrings "strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
)
// exclude those used by azure as resource and OS root in /dev/disk/azure
func listAzureDiskPath(io ioHandler) []string {
azureDiskPath := "/dev/disk/azure/"
var azureDiskList []string
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
for _, f := range dirs {
name := f.Name()
diskPath := azureDiskPath + name
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
sd := link[(libstrings.LastIndex(link, "/") + 1):]
azureDiskList = append(azureDiskList, sd)
}
}
}
glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
return azureDiskList
}
func scsiHostRescan(io ioHandler, exec mount.Exec) {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
if err = io.WriteFile(name, data, 0666); err != nil {
glog.Warningf("failed to rescan scsi host %s", name)
}
}
} else {
glog.Warningf("failed to read %s, err %v", scsi_path, err)
}
}
func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) {
azureDisks := listAzureDiskPath(io)
return findDiskByLunWithConstraint(lun, io, azureDisks)
}
// finds a device mounted to "current" node
func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (string, error) {
var err error
sys_path := "/sys/bus/scsi/devices"
if dirs, err := io.ReadDir(sys_path); err == nil {
for _, f := range dirs {
name := f.Name()
// look for path like /sys/bus/scsi/devices/3:0:0:1
arr := libstrings.Split(name, ":")
if len(arr) < 4 {
continue
}
if len(azureDisks) == 0 {
glog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name)
target, err := strconv.Atoi(arr[0])
if err != nil {
glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
continue
}
// as observed, targets 0-3 are used by OS disks. Skip them
if target <= 3 {
continue
}
}
// extract LUN from the path.
// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
l, err := strconv.Atoi(arr[3])
if err != nil {
// unknown path format, continue to read the next one
glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err)
continue
}
if lun == l {
// find the matching LUN
// read vendor and model to ensure it is a VHD disk
vendorPath := path.Join(sys_path, name, "vendor")
vendorBytes, err := io.ReadFile(vendorPath)
if err != nil {
glog.Errorf("failed to read device vendor, err: %v", err)
continue
}
vendor := libstrings.TrimSpace(string(vendorBytes))
if libstrings.ToUpper(vendor) != "MSFT" {
glog.V(4).Infof("vendor doesn't match VHD, got %s", vendor)
continue
}
modelPath := path.Join(sys_path, name, "model")
modelBytes, err := io.ReadFile(modelPath)
if err != nil {
glog.Errorf("failed to read device model, err: %v", err)
continue
}
model := libstrings.TrimSpace(string(modelBytes))
if libstrings.ToUpper(model) != "VIRTUAL DISK" {
glog.V(4).Infof("model doesn't match VHD, got %s", model)
continue
}
// find a disk, validate name
dir := path.Join(sys_path, name, "block")
if dev, err := io.ReadDir(dir); err == nil {
found := false
for _, diskName := range azureDisks {
glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName)
if string(dev[0].Name()) == diskName {
found = true
break
}
}
if !found {
return "/dev/" + dev[0].Name(), nil
}
}
}
}
}
return "", err
}
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
notFormatted, err := diskLooksUnformatted(disk, exec)
if err == nil && notFormatted {
args := []string{disk}
// Disk is unformatted so format it.
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", disk}
}
glog.Infof("azureDisk - Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", disk, fstype, args)
_, err := exec.Run("mkfs."+fstype, args...)
if err == nil {
// the disk has been formatted successfully try to mount it again.
glog.Infof("azureDisk - Disk successfully formatted with 'mkfs.%s %v'", fstype, args)
} else {
glog.Warningf("azureDisk - Error formatting volume with 'mkfs.%s %v': %v", fstype, args, err)
}
} else {
if err != nil {
glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err)
} else {
glog.Infof("azureDisk - Disk %s already formatted, will not format", disk)
}
}
}
func diskLooksUnformatted(disk string, exec mount.Exec) (bool, error) {
args := []string{"-nd", "-o", "FSTYPE", disk}
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
dataOut, err := exec.Run("lsblk", args...)
if err != nil {
glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return false, err
}
output := libstrings.TrimSpace(string(dataOut))
return output == "", nil
}

View File

@ -0,0 +1,136 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"runtime"
"strings"
"testing"
"time"
"k8s.io/kubernetes/pkg/util/mount"
)
type fakeFileInfo struct {
name string
}
func (fi *fakeFileInfo) Name() string {
return fi.name
}
func (fi *fakeFileInfo) Size() int64 {
return 0
}
func (fi *fakeFileInfo) Mode() os.FileMode {
return 777
}
func (fi *fakeFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *fakeFileInfo) IsDir() bool {
return false
}
func (fi *fakeFileInfo) Sys() interface{} {
return nil
}
var (
lun = 1
lunStr = "1"
diskPath = "4:0:0:" + lunStr
devName = "sdd"
lun1 = 2
lunStr1 = "2"
diskPath1 = "3:0:0:" + lunStr1
devName1 = "sde"
)
type fakeIOHandler struct{}
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/sys/bus/scsi/devices":
f1 := &fakeFileInfo{
name: "3:0:0:1",
}
f2 := &fakeFileInfo{
name: "4:0:0:0",
}
f3 := &fakeFileInfo{
name: diskPath,
}
f4 := &fakeFileInfo{
name: "host1",
}
f5 := &fakeFileInfo{
name: "target2:0:0",
}
return []os.FileInfo{f1, f2, f3, f4, f5}, nil
case "/sys/bus/scsi/devices/" + diskPath + "/block":
n := &fakeFileInfo{
name: devName,
}
return []os.FileInfo{n}, nil
case "/sys/bus/scsi/devices/" + diskPath1 + "/block":
n := &fakeFileInfo{
name: devName1,
}
return []os.FileInfo{n}, nil
}
return nil, fmt.Errorf("bad dir")
}
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return nil
}
func (handler *fakeIOHandler) Readlink(name string) (string, error) {
return "/dev/azure/disk/sda", nil
}
func (handler *fakeIOHandler) ReadFile(filename string) ([]byte, error) {
if strings.HasSuffix(filename, "vendor") {
return []byte("Msft \n"), nil
}
if strings.HasSuffix(filename, "model") {
return []byte("Virtual Disk \n"), nil
}
return nil, fmt.Errorf("unknown file")
}
func TestIoHandler(t *testing.T) {
if runtime.GOOS != "windows" && runtime.GOOS != "linux" {
t.Skipf("TestIoHandler not supported on GOOS=%s", runtime.GOOS)
}
disk, err := findDiskByLun(lun, &fakeIOHandler{}, mount.NewOsExec())
if runtime.GOOS == "windows" {
if err != nil {
t.Errorf("no data disk found: disk %v err %v", disk, err)
}
} else {
// if no disk matches lun, exit
if disk != "/dev/"+devName || err != nil {
t.Errorf("no data disk found: disk %v err %v", disk, err)
}
}
}

View File

@ -0,0 +1,31 @@
// +build !linux,!windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import "k8s.io/kubernetes/pkg/util/mount"
func scsiHostRescan(io ioHandler, exec mount.Exec) {
}
func findDiskByLun(lun int, io ioHandler, exec mount.Exec) (string, error) {
return "", nil
}
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
}

View File

@ -0,0 +1,114 @@
// +build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
)
func scsiHostRescan(io ioHandler, exec mount.Exec) {
cmd := "Update-HostStorageCache"
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output))
}
}
// search Windows disk number by LUN
func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error) {
cmd := `Get-Disk | select number, location | ConvertTo-Json`
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output))
return "", err
}
if len(output) < 10 {
return "", fmt.Errorf("Get-Disk output is too short, output: %q", string(output))
}
var data []map[string]interface{}
if err = json.Unmarshal(output, &data); err != nil {
glog.Errorf("Get-Disk output is not a json array, output: %q", string(output))
return "", err
}
for _, v := range data {
if jsonLocation, ok := v["location"]; ok {
if location, ok := jsonLocation.(string); ok {
if !strings.Contains(location, " LUN ") {
continue
}
arr := strings.Split(location, " ")
arrLen := len(arr)
if arrLen < 3 {
glog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation)
continue
}
glog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1])
//last element of location field is LUN number, e.g.
// "location": "Integrated : Adapter 3 : Port 0 : Target 0 : LUN 1"
l, err := strconv.Atoi(arr[arrLen-1])
if err != nil {
glog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1])
continue
}
if l == lun {
glog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun)
if d, ok := v["number"]; ok {
if diskNum, ok := d.(float64); ok {
glog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun)
return strconv.Itoa(int(diskNum)), nil
}
glog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location)
}
return "", fmt.Errorf("LUN(%d) found, but could not get disk number, location: %q", lun, location)
}
}
}
}
return "", nil
}
func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) {
if err := mount.ValidateDiskNumber(disk); err != nil {
glog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err)
return
}
cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru", disk)
cmd += " | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem NTFS -Confirm:$false"
output, err := exec.Run("powershell", "/c", cmd)
if err != nil {
glog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output))
} else {
glog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype)
}
}

View File

@ -0,0 +1,215 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"github.com/Azure/azure-sdk-for-go/arm/compute"
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
// interface exposed by the cloud provider implementing Disk functionlity
type DiskController interface {
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
DeleteBlobDisk(diskUri string) error
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error)
DeleteManagedDisk(diskURI string) error
// Attaches the disk to the host machine.
AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
// Detaches the disk, identified by disk name or uri, from the host machine.
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
// Check if a list of volumes are attached to the node with the specified NodeName
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
// Get the LUN number of the disk that is attached to the host
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
// Get the next available LUN number to attach a new VHD
GetNextDiskLun(nodeName types.NodeName) (int32, error)
// Create a VHD blob
CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error)
// Delete a VHD blob
DeleteVolume(diskURI string) error
}
type azureDataDiskPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
const (
azureDataDiskPluginName = "kubernetes.io/azure-disk"
)
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
}
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *azureDataDiskPlugin) GetPluginName() string {
return azureDataDiskPluginName
}
func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.DataDiskURI, nil
}
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil) ||
(spec.Volume != nil && spec.Volume.AzureDisk != nil)
}
func (plugin *azureDataDiskPlugin) RequiresRemount() bool {
return false
}
func (plugin *azureDataDiskPlugin) SupportsMountOption() bool {
return true
}
func (plugin *azureDataDiskPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
// NewAttacher initializes an Attacher
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err)
return nil, err
}
return &azureDiskAttacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
return nil, err
}
return &azureDiskDetacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host)
return &azureDiskDeleter{
spec: spec,
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
if len(options.PVC.Spec.AccessModes) == 0 {
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
}
return &azureDiskProvisioner{
plugin: plugin,
options: options,
}, nil
}
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host)
return &azureDiskMounter{
plugin: plugin,
spec: spec,
options: options,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
disk := makeDataDisk(volName, podUID, "", plugin.host)
return &azureDiskUnmounter{
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
azureVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DataDiskURI: sourceName,
},
},
}
return volume.NewSpecFromVolume(azureVolume), nil
}
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
}

View File

@ -0,0 +1,55 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"os"
"testing"
"k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("azure_dd")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != azureDataDiskPluginName {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
// fakeAzureProvider type was removed because all functions were not used
// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test.

View File

@ -0,0 +1,195 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"os"
"runtime"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
type azureDiskMounter struct {
*dataDisk
spec *volume.Spec
plugin *azureDataDiskPlugin
options volume.VolumeOptions
}
type azureDiskUnmounter struct {
*dataDisk
plugin *azureDataDiskPlugin
}
var _ volume.Unmounter = &azureDiskUnmounter{}
var _ volume.Mounter = &azureDiskMounter{}
func (m *azureDiskMounter) GetAttributes() volume.Attributes {
readOnly := false
volumeSource, err := getVolumeSource(m.spec)
if err != nil {
glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err)
} else if volumeSource.ReadOnly != nil {
readOnly = *volumeSource.ReadOnly
}
return volume.Attributes{
ReadOnly: readOnly,
Managed: !readOnly,
SupportsSELinux: true,
}
}
func (m *azureDiskMounter) CanMount() error {
return nil
}
func (m *azureDiskMounter) SetUp(fsGroup *int64) error {
return m.SetUpAt(m.GetPath(), fsGroup)
}
func (m *azureDiskMounter) GetPath() string {
return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host)
}
func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
mounter := m.plugin.host.GetMounter(m.plugin.GetPluginName())
volumeSource, err := getVolumeSource(m.spec)
if err != nil {
glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
return err
}
diskName := volumeSource.DiskName
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err)
return err
}
if !mountPoint {
glog.V(4).Infof("azureDisk - already mounted to target %s", dir)
return nil
}
if runtime.GOOS != "windows" {
// in windows, we will use mklink to mount, will MkdirAll in Mount func
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err)
return err
}
}
options := []string{"bind"}
if volumeSource.ReadOnly != nil && *volumeSource.ReadOnly {
options = append(options, "ro")
}
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
if err != nil {
return err
}
mountErr := mounter.Mount(globalPDPath, dir, *volumeSource.FSType, options)
// Everything in the following control flow is meant as an
// attempt cleanup a failed setupAt (bind mount)
if mountErr != nil {
glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr)
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr)
}
if !mountPoint {
if err = mounter.Unmount(dir); err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
}
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
}
if !mountPoint {
// not cool. leave for next sync loop.
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount(). Will try again next sync loop.", diskName, dir, mountErr)
}
}
if err = os.Remove(dir); err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr)
}
glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, err, mountErr)
return mountErr
}
if volumeSource.ReadOnly == nil || !*volumeSource.ReadOnly {
volume.SetVolumeOwnership(m, fsGroup)
}
glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
return nil
}
func (u *azureDiskUnmounter) TearDown() error {
return u.TearDownAt(u.GetPath())
}
func (u *azureDiskUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
glog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
mounter := u.plugin.host.GetMounter(u.plugin.GetPluginName())
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err)
}
if mountPoint {
if err := os.Remove(dir); err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err)
}
}
if err := mounter.Unmount(dir); err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err)
}
mountPoint, err = mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err)
}
if mountPoint {
return os.Remove(dir)
}
return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir")
}
func (u *azureDiskUnmounter) GetPath() string {
return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host)
}

View File

@ -0,0 +1,191 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume"
)
type azureDiskProvisioner struct {
plugin *azureDataDiskPlugin
options volume.VolumeOptions
}
type azureDiskDeleter struct {
*dataDisk
spec *volume.Spec
plugin *azureDataDiskPlugin
}
var _ volume.Provisioner = &azureDiskProvisioner{}
var _ volume.Deleter = &azureDiskDeleter{}
func (d *azureDiskDeleter) GetPath() string {
return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host)
}
func (d *azureDiskDeleter) Delete() error {
volumeSource, err := getVolumeSource(d.spec)
if err != nil {
return err
}
diskController, err := getDiskController(d.plugin.host)
if err != nil {
return err
}
managed := (*volumeSource.Kind == v1.AzureManagedDisk)
if managed {
return diskController.DeleteManagedDisk(volumeSource.DataDiskURI)
}
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI)
}
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
}
supportedModes := p.plugin.GetAccessModes()
// perform static validation first
if p.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
}
if len(p.options.PVC.Spec.AccessModes) > 1 {
return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin")
}
if len(p.options.PVC.Spec.AccessModes) == 1 {
if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] {
return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes)
}
}
var (
location, account string
storageAccountType, fsType string
cachingMode v1.AzureDataDiskCachingMode
strKind string
err error
)
// maxLength = 79 - (4 for ".vhd") = 75
name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
for k, v := range p.options.Parameters {
switch strings.ToLower(k) {
case "skuname":
storageAccountType = v
case "location":
location = v
case "storageaccount":
account = v
case "storageaccounttype":
storageAccountType = v
case "kind":
strKind = v
case "cachingmode":
cachingMode = v1.AzureDataDiskCachingMode(v)
case volume.VolumeParameterFSType:
fsType = strings.ToLower(v)
default:
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
}
}
// normalize values
fsType = normalizeFsType(fsType)
skuName, err := normalizeStorageAccountType(storageAccountType)
if err != nil {
return nil, err
}
kind, err := normalizeKind(strFirstLetterToUpper(strKind))
if err != nil {
return nil, err
}
if cachingMode, err = normalizeCachingMode(cachingMode); err != nil {
return nil, err
}
diskController, err := getDiskController(p.plugin.host)
if err != nil {
return nil, err
}
// create disk
diskURI := ""
if kind == v1.AzureManagedDisk {
diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags))
if err != nil {
return nil, err
}
} else {
if kind == v1.AzureDedicatedBlobDisk {
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGB)
if err != nil {
return nil, err
}
} else {
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB)
if err != nil {
return nil, err
}
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: p.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
AccessModes: supportedModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
CachingMode: &cachingMode,
DiskName: name,
DataDiskURI: diskURI,
Kind: &kind,
FSType: &fsType,
},
},
MountOptions: p.options.MountOptions,
},
}
return pv, nil
}