Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -6,6 +6,8 @@ go_library(
"atomic_writer.go",
"attach_limit.go",
"device_util.go",
"device_util_linux.go",
"device_util_unsupported.go",
"doc.go",
"error.go",
"finalizer.go",
@ -14,42 +16,7 @@ go_library(
"nested_volumes.go",
"resize_util.go",
"util.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"device_util_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"device_util_unsupported.go",
],
"//conditions:default": [],
}),
],
importpath = "k8s.io/kubernetes/pkg/volume/util",
visibility = ["//visibility:public"],
deps = [
@ -58,50 +25,55 @@ go_library(
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/resizefs:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"atomic_writer_test.go",
"attach_limit_test.go",
"device_util_linux_test.go",
"main_test.go",
"nested_volumes_test.go",
"resize_util_test.go",
"util_test.go",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"atomic_writer_test.go",
"device_util_linux_test.go",
],
"//conditions:default": [],
}),
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/volume:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@ -27,7 +27,7 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/sets"
)
@ -61,6 +61,7 @@ type AtomicWriter struct {
logContext string
}
// FileProjection contains file Data and access Mode
type FileProjection struct {
Data []byte
Mode int32
@ -120,7 +121,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
// (1)
cleanPayload, err := validatePayload(payload)
if err != nil {
glog.Errorf("%s: invalid payload: %v", w.logContext, err)
klog.Errorf("%s: invalid payload: %v", w.logContext, err)
return err
}
@ -129,7 +130,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil {
if !os.IsNotExist(err) {
glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
klog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
return err
}
// although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs)
@ -144,41 +145,40 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
// (3)
pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath)
if err != nil {
glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
klog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
return err
}
// (4)
if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil {
glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
klog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
return err
} else if !should && len(pathsToRemove) == 0 {
glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
klog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
return nil
} else {
glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
klog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
}
}
// (5)
tsDir, err := w.newTimestampDir()
if err != nil {
glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
klog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
return err
}
tsDirName := filepath.Base(tsDir)
// (6)
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
klog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
return err
} else {
glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
}
klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
// (7)
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
return err
}
@ -186,7 +186,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
newDataDirPath := path.Join(w.targetDir, newDataDirName)
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
os.RemoveAll(tsDir)
glog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
klog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
return err
}
@ -201,20 +201,20 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
if err != nil {
os.Remove(newDataDirPath)
os.RemoveAll(tsDir)
glog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err)
klog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err)
return err
}
// (10)
if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
glog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
return err
}
// (11)
if len(oldTsDir) > 0 {
if err = os.RemoveAll(oldTsPath); err != nil {
glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
return err
}
}
@ -222,7 +222,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
return nil
}
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) {
cleanPayload := make(map[string]FileProjection)
for k, content := range payload {
@ -329,7 +329,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir
} else if err != nil {
return nil, err
}
glog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List())
klog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List())
newPaths := sets.NewString()
for file := range payload {
@ -341,10 +341,10 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir
subPath = strings.TrimSuffix(subPath, string(os.PathSeparator))
}
}
glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
klog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
result := paths.Difference(newPaths)
glog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
klog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
return result, nil
}
@ -353,7 +353,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir
func (w *AtomicWriter) newTimestampDir() (string, error) {
tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
if err != nil {
glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
klog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
return "", err
}
@ -362,7 +362,7 @@ func (w *AtomicWriter) newTimestampDir() (string, error) {
// regardless of the process' umask.
err = os.Chmod(tsDir, 0755)
if err != nil {
glog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err)
klog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err)
return "", err
}
@ -380,13 +380,13 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
err := os.MkdirAll(baseDir, os.ModePerm)
if err != nil {
glog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
klog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
return err
}
err = ioutil.WriteFile(fullPath, content, mode)
if err != nil {
glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
return err
}
// Chmod is needed because ioutil.WriteFile() ends up calling
@ -395,7 +395,7 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
// in the file no matter what the umask is.
err = os.Chmod(fullPath, mode)
if err != nil {
glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
}
}
@ -445,7 +445,7 @@ func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
continue
}
if err := os.Remove(path.Join(w.targetDir, p)); err != nil {
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
klog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
lasterr = err
}
}

View File

@ -16,14 +16,48 @@ limitations under the License.
package util
import (
"crypto/sha1"
"encoding/hex"
)
// This file is a common place holder for volume limit utility constants
// shared between volume package and scheduler
const (
// EBSVolumeLimitKey resource name that will store volume limits for EBS
EBSVolumeLimitKey = "attachable-volumes-aws-ebs"
// EBSNitroLimitRegex finds nitro instance types with different limit than EBS defaults
EBSNitroLimitRegex = "^[cmr]5.*|t3|z1d"
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
// Amazon recommends no more than 40; the system root volume uses at least one.
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
DefaultMaxEBSVolumes = 39
// DefaultMaxEBSNitroVolumeLimit is default EBS volume limit on m5 and c5 instances
DefaultMaxEBSNitroVolumeLimit = 25
// AzureVolumeLimitKey stores resource name that will store volume limits for Azure
AzureVolumeLimitKey = "attachable-volumes-azure-disk"
// GCEVolumeLimitKey stores resource name that will store volume limits for GCE node
GCEVolumeLimitKey = "attachable-volumes-gce-pd"
// CSIAttachLimitPrefix defines prefix used for CSI volumes
CSIAttachLimitPrefix = "attachable-volumes-csi-"
// ResourceNameLengthLimit stores maximum allowed Length for a ResourceName
ResourceNameLengthLimit = 63
)
// GetCSIAttachLimitKey returns limit key used for CSI volumes
func GetCSIAttachLimitKey(driverName string) string {
csiPrefixLength := len(CSIAttachLimitPrefix)
totalkeyLength := csiPrefixLength + len(driverName)
if totalkeyLength >= ResourceNameLengthLimit {
charsFromDriverName := driverName[:23]
hash := sha1.New()
hash.Write([]byte(driverName))
hashed := hex.EncodeToString(hash.Sum(nil))
hashed = hashed[:16]
return CSIAttachLimitPrefix + charsFromDriverName + hashed
}
return CSIAttachLimitPrefix + driverName
}

View File

@ -0,0 +1,55 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"crypto/sha1"
"encoding/hex"
"testing"
"k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
)
func TestGetCSIAttachLimitKey(t *testing.T) {
// When driverName is less than 39 characters
csiLimitKey := GetCSIAttachLimitKey("com.amazon.ebs")
if csiLimitKey != "attachable-volumes-csi-com.amazon.ebs" {
t.Errorf("Expected com.amazon.ebs got %s", csiLimitKey)
}
// When driver is longer than 39 chars
longDriverName := "com.amazon.kubernetes.eks.ec2.ebs/csi-driver"
csiLimitKeyLonger := GetCSIAttachLimitKey(longDriverName)
if !v1helper.IsAttachableVolumeResourceName(v1.ResourceName(csiLimitKeyLonger)) {
t.Errorf("Expected %s to have attachable prefix", csiLimitKeyLonger)
}
expectedCSIKey := getDriverHash(longDriverName)
if csiLimitKeyLonger != expectedCSIKey {
t.Errorf("Expected limit to be %s got %s", expectedCSIKey, csiLimitKeyLonger)
}
}
func getDriverHash(driverName string) string {
charsFromDriverName := driverName[:23]
hash := sha1.New()
hash.Write([]byte(driverName))
hashed := hex.EncodeToString(hash.Sum(nil))
hashed = hashed[:16]
return CSIAttachLimitPrefix + charsFromDriverName + hashed
}

View File

@ -20,13 +20,15 @@ package util
type DeviceUtil interface {
FindMultipathDeviceForDevice(disk string) string
FindSlaveDevicesOnMultipath(disk string) []string
GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error)
FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error)
}
type deviceHandler struct {
get_io IoUtil
getIo IoUtil
}
//NewDeviceHandler Create a new IoHandler implementation
func NewDeviceHandler(io IoUtil) DeviceUtil {
return &deviceHandler{get_io: io}
return &deviceHandler{getIo: io}
}

View File

@ -20,13 +20,16 @@ package util
import (
"errors"
"fmt"
"k8s.io/klog"
"path"
"strconv"
"strings"
)
// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
io := handler.get_io
io := handler.getIo
disk, err := findDeviceForPath(device, io)
if err != nil {
return ""
@ -65,7 +68,7 @@ func findDeviceForPath(path string, io IoUtil) (string, error) {
// which are managed by the devicemapper dm-1.
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
var devices []string
io := handler.get_io
io := handler.getIo
// Split path /dev/dm-1 into "", "dev", "dm-1"
parts := strings.Split(dm, "/")
if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") {
@ -80,3 +83,206 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
}
return devices
}
// GetISCSIPortalHostMapForTarget given a target iqn, find all the scsi hosts logged into
// that target. Returns a map of iSCSI portals (string) to SCSI host numbers (integers).
// For example: {
// "192.168.30.7:3260": 2,
// "192.168.30.8:3260": 3,
// }
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
portalHostMap := make(map[string]int)
io := handler.getIo
// Iterate over all the iSCSI hosts in sysfs
sysPath := "/sys/class/iscsi_host"
hostDirs, err := io.ReadDir(sysPath)
if err != nil {
return nil, err
}
for _, hostDir := range hostDirs {
// iSCSI hosts are always of the format "host%d"
// See drivers/scsi/hosts.c in Linux
hostName := hostDir.Name()
if !strings.HasPrefix(hostName, "host") {
continue
}
hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host"))
if err != nil {
klog.Errorf("Could not get number from iSCSI host: %s", hostName)
continue
}
// Iterate over the children of the iscsi_host device
// We are looking for the associated session
devicePath := sysPath + "/" + hostName + "/device"
deviceDirs, err := io.ReadDir(devicePath)
if err != nil {
return nil, err
}
for _, deviceDir := range deviceDirs {
// Skip over files that aren't the session
// Sessions are of the format "session%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
sessionName := deviceDir.Name()
if !strings.HasPrefix(sessionName, "session") {
continue
}
sessionPath := devicePath + "/" + sessionName
// Read the target name for the iSCSI session
targetNamePath := sessionPath + "/iscsi_session/" + sessionName + "/targetname"
targetName, err := io.ReadFile(targetNamePath)
if err != nil {
return nil, err
}
// Ignore hosts that don't matchthe target we were looking for.
if strings.TrimSpace(string(targetName)) != targetIqn {
continue
}
// Iterate over the children of the iSCSI session looking
// for the iSCSI connection.
dirs2, err := io.ReadDir(sessionPath)
if err != nil {
return nil, err
}
for _, dir2 := range dirs2 {
// Skip over files that aren't the connection
// Connections are of the format "connection%d:%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
dirName := dir2.Name()
if !strings.HasPrefix(dirName, "connection") {
continue
}
connectionPath := sessionPath + "/" + dirName + "/iscsi_connection/" + dirName
// Read the current and persistent portal information for the connection.
addrPath := connectionPath + "/address"
addr, err := io.ReadFile(addrPath)
if err != nil {
return nil, err
}
portPath := connectionPath + "/port"
port, err := io.ReadFile(portPath)
if err != nil {
return nil, err
}
persistentAddrPath := connectionPath + "/persistent_address"
persistentAddr, err := io.ReadFile(persistentAddrPath)
if err != nil {
return nil, err
}
persistentPortPath := connectionPath + "/persistent_port"
persistentPort, err := io.ReadFile(persistentPortPath)
if err != nil {
return nil, err
}
// Add entries to the map for both the current and persistent portals
// pointing to the SCSI host for those connections
portal := strings.TrimSpace(string(addr)) + ":" +
strings.TrimSpace(string(port))
portalHostMap[portal] = hostNumber
persistentPortal := strings.TrimSpace(string(persistentAddr)) + ":" +
strings.TrimSpace(string(persistentPort))
portalHostMap[persistentPortal] = hostNumber
}
}
}
return portalHostMap, nil
}
// FindDevicesForISCSILun given an iqn, and lun number, find all the devices
// corresponding to that LUN.
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
devices := make([]string, 0)
io := handler.getIo
// Iterate over all the iSCSI hosts in sysfs
sysPath := "/sys/class/iscsi_host"
hostDirs, err := io.ReadDir(sysPath)
if err != nil {
return nil, err
}
for _, hostDir := range hostDirs {
// iSCSI hosts are always of the format "host%d"
// See drivers/scsi/hosts.c in Linux
hostName := hostDir.Name()
if !strings.HasPrefix(hostName, "host") {
continue
}
hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host"))
if err != nil {
klog.Errorf("Could not get number from iSCSI host: %s", hostName)
continue
}
// Iterate over the children of the iscsi_host device
// We are looking for the associated session
devicePath := sysPath + "/" + hostName + "/device"
deviceDirs, err := io.ReadDir(devicePath)
if err != nil {
return nil, err
}
for _, deviceDir := range deviceDirs {
// Skip over files that aren't the session
// Sessions are of the format "session%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
sessionName := deviceDir.Name()
if !strings.HasPrefix(sessionName, "session") {
continue
}
// Read the target name for the iSCSI session
targetNamePath := devicePath + "/" + sessionName + "/iscsi_session/" + sessionName + "/targetname"
targetName, err := io.ReadFile(targetNamePath)
if err != nil {
return nil, err
}
// Only if the session matches the target we were looking for,
// add it to the map
if strings.TrimSpace(string(targetName)) != targetIqn {
continue
}
// The list of block devices on the scsi bus will be in a
// directory called "target%d:%d:%d".
// See drivers/scsi/scsi_scan.c in Linux
// We assume the channel/bus and device/controller are always zero for iSCSI
targetPath := devicePath + "/" + sessionName + fmt.Sprintf("/target%d:0:0", hostNumber)
// The block device for a given lun will be "%d:%d:%d:%d" --
// host:channel:bus:LUN
blockDevicePath := targetPath + fmt.Sprintf("/%d:0:0:%d", hostNumber, lun)
// If the LUN doesn't exist on this bus, continue on
_, err = io.Lstat(blockDevicePath)
if err != nil {
continue
}
// Read the block directory, there should only be one child --
// the block device "sd*"
path := blockDevicePath + "/block"
dirs, err := io.ReadDir(path)
if err != nil {
return nil, err
}
if 0 < len(dirs) {
devices = append(devices, dirs[0].Name())
}
}
}
return devices, nil
}

View File

@ -22,12 +22,41 @@ import (
"errors"
"os"
"reflect"
"regexp"
"testing"
"time"
)
type mockOsIOHandler struct{}
func (handler *mockOsIOHandler) ReadFile(filename string) ([]byte, error) {
portPattern := regexp.MustCompile("^/sys/class/iscsi_host/(host\\d)/device/session\\d/connection\\d:0/iscsi_connection/connection\\d:0/(?:persistent_)?port$")
if portPattern.MatchString(filename) {
return []byte("3260"), nil
}
addressPattern := regexp.MustCompile("^/sys/class/iscsi_host/(host\\d)/device/session\\d/connection\\d:0/iscsi_connection/connection\\d:0/(?:persistent_)?address$")
matches := addressPattern.FindStringSubmatch(filename)
if nil != matches {
switch matches[1] {
case "host2":
return []byte("10.0.0.1"), nil
case "host3":
return []byte("10.0.0.2"), nil
}
}
targetNamePattern := regexp.MustCompile("^/sys/class/iscsi_host/(host\\d)/device/session\\d/iscsi_session/session\\d/targetname$")
matches = targetNamePattern.FindStringSubmatch(filename)
if nil != matches {
switch matches[1] {
case "host2":
return []byte("target1"), nil
case "host3":
return []byte("target2"), nil
}
}
return nil, errors.New("Not Implemented for Mock")
}
func (handler *mockOsIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/sys/block/dm-1/slaves":
@ -46,14 +75,81 @@ func (handler *mockOsIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
name: "dm-1",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host":
f1 := &fakeFileInfo{
name: "host2",
}
f2 := &fakeFileInfo{
name: "host3",
}
f3 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2, f3}, nil
case "/sys/class/iscsi_host/host2/device":
f1 := &fakeFileInfo{
name: "session1",
}
f2 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host/host3/device":
f1 := &fakeFileInfo{
name: "session2",
}
f2 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host/host2/device/session1":
f1 := &fakeFileInfo{
name: "connection1:0",
}
f2 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host/host3/device/session2":
f1 := &fakeFileInfo{
name: "connection2:0",
}
f2 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host/host2/device/session1/target2:0:0/2:0:0:1/block":
f1 := &fakeFileInfo{
name: "sda",
}
return []os.FileInfo{f1}, nil
case "/sys/class/iscsi_host/host2/device/session1/target2:0:0/2:0:0:2/block":
f1 := &fakeFileInfo{
name: "sdc",
}
return []os.FileInfo{f1}, nil
case "/sys/class/iscsi_host/host3/device/session2/target3:0:0/3:0:0:1/block":
f1 := &fakeFileInfo{
name: "sdb",
}
return []os.FileInfo{f1}, nil
case "/sys/class/iscsi_host/host3/device/session2/target3:0:0/3:0:0:2/block":
f1 := &fakeFileInfo{
name: "sdd",
}
return []os.FileInfo{f1}, nil
}
return nil, nil
return nil, errors.New("Not Implemented for Mock")
}
func (handler *mockOsIOHandler) Lstat(name string) (os.FileInfo, error) {
links := map[string]string{
"/sys/block/dm-1/slaves/sda": "sda",
"/dev/sda": "sda",
"/sys/class/iscsi_host/host2/device/session1/target2:0:0/2:0:0:1": "2:0:0:1",
"/sys/class/iscsi_host/host2/device/session1/target2:0:0/2:0:0:2": "2:0:0:2",
"/sys/class/iscsi_host/host3/device/session2/target3:0:0/3:0:0:1": "3:0:0:1",
"/sys/class/iscsi_host/host3/device/session2/target3:0:0/3:0:0:2": "3:0:0:2",
}
if dev, ok := links[name]; ok {
return &fakeFileInfo{name: dev}, nil
@ -63,8 +159,8 @@ func (handler *mockOsIOHandler) Lstat(name string) (os.FileInfo, error) {
func (handler *mockOsIOHandler) EvalSymlinks(path string) (string, error) {
links := map[string]string{
"/returns/a/dev": "/dev/sde",
"/returns/non/dev": "/sys/block",
"/returns/a/dev": "/dev/sde",
"/returns/non/dev": "/sys/block",
"/dev/disk/by-path/127.0.0.1:3260-eui.02004567A425678D-lun-0": "/dev/sda",
"/dev/disk/by-path/127.0.0.3:3260-eui.03004567A425678D-lun-0": "/dev/sdb",
"/dev/dm-2": "/dev/dm-2",
@ -158,3 +254,37 @@ func TestFindSlaveDevicesOnMultipath(t *testing.T) {
t.Fatalf("mpio device not found '' expected got [%s]", dev)
}
}
func TestGetISCSIPortalHostMapForTarget(t *testing.T) {
mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{})
portalHostMap, err := mockDeviceUtil.GetISCSIPortalHostMapForTarget("target1")
if nil != err {
t.Fatalf("error getting scsi hosts for target: %v", err)
}
if nil == portalHostMap {
t.Fatal("no portal host map returned")
}
if 1 != len(portalHostMap) {
t.Fatalf("wrong number of map entries in portal host map: %d", len(portalHostMap))
}
if 2 != portalHostMap["10.0.0.1:3260"] {
t.Fatalf("incorrect entry in portal host map: %v", portalHostMap)
}
}
func TestFindDevicesForISCSILun(t *testing.T) {
mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{})
devices, err := mockDeviceUtil.FindDevicesForISCSILun("target1", 1)
if nil != err {
t.Fatalf("error getting devices for lun: %v", err)
}
if nil == devices {
t.Fatal("no devices returned")
}
if 1 != len(devices) {
t.Fatalf("wrong number of devices: %d", len(devices))
}
if "sda" != devices[0] {
t.Fatalf("incorrect device %v", devices)
}
}

View File

@ -28,3 +28,15 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(disk string) []string
out := []string{}
return out
}
// GetISCSIPortalHostMapForTarget unsupported returns nil
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
portalHostMap := make(map[string]int)
return portalHostMap, nil
}
// FindDevicesForISCSILun unsupported returns nil
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
devices := []string{}
return devices, nil
}

View File

@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Contains utility code for use by volume plugins.
// Package util contains utility code for use by volume plugins.
package util // import "k8s.io/kubernetes/pkg/volume/util"

View File

@ -20,7 +20,7 @@ import (
k8stypes "k8s.io/apimachinery/pkg/types"
)
// This error on attach indicates volume is attached to a different node
// DanglingAttachError indicates volume is attached to a different node
// than we expected.
type DanglingAttachError struct {
msg string
@ -32,6 +32,7 @@ func (err *DanglingAttachError) Error() string {
return err.msg
}
// NewDanglingError create a new dangling error
func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error {
return &DanglingAttachError{
msg: msg,

View File

@ -17,9 +17,9 @@ limitations under the License.
package util
const (
// Name of finalizer on PVCs that have a running pod.
// PVCProtectionFinalizer is the name of finalizer on PVCs that have a running pod.
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
// Name of finalizer on PVs that are bound by PVCs
// PVProtectionFinalizer is the name of finalizer on PVs that are bound by PVCs
PVProtectionFinalizer = "kubernetes.io/pv-protection"
)

View File

@ -2,80 +2,49 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = select({
"@io_bazel_rules_go//go/platform:android": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"fs.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"fs.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"fs_windows.go",
],
"//conditions:default": [],
}),
srcs = [
"fs.go",
"fs_unsupported.go",
"fs_windows.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/util/fs",
visibility = ["//visibility:public"],
deps = select({
"@io_bazel_rules_go//go/platform:android": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:darwin": [
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:nacl": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:plan9": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:solaris": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:windows": [
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/golang.org/x/sys/windows:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"//conditions:default": [],
}),

View File

@ -24,6 +24,7 @@ import (
// IoUtil is a mockable util for common IO operations
type IoUtil interface {
ReadFile(filename string) ([]byte, error)
ReadDir(dirname string) ([]os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
EvalSymlinks(path string) (string, error)
@ -36,6 +37,9 @@ func NewIOHandler() IoUtil {
return &osIOHandler{}
}
func (handler *osIOHandler) ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}

29
vendor/k8s.io/kubernetes/pkg/volume/util/main_test.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
_ "k8s.io/kubernetes/pkg/features"
)
func TestMain(m *testing.M) {
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
}

View File

@ -17,9 +17,11 @@ limitations under the License.
package util
import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/kubernetes/pkg/volume"
)
var storageOperationMetric = prometheus.NewHistogramVec(
@ -62,3 +64,15 @@ func OperationCompleteHook(plugin, operationName string) func(*error) {
}
return opComplete
}
// GetFullQualifiedPluginNameForVolume returns full qualified plugin name for
// given volume. For CSI plugin, it appends plugin driver name at the end of
// plugin name, e.g. kubernetes.io/csi:csi-hostpath. It helps to distinguish
// between metrics emitted for CSI volumes which may be handled by different
// CSI plugin drivers.
func GetFullQualifiedPluginNameForVolume(pluginName string, spec *volume.Spec) string {
if spec != nil && spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil {
return fmt.Sprintf("%s:%s", pluginName, spec.PersistentVolume.Spec.CSI.Driver)
}
return pluginName
}

View File

@ -13,9 +13,9 @@ go_library(
deps = [
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -25,8 +25,8 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/volume/util/types:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)

View File

@ -28,9 +28,9 @@ import (
"fmt"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
k8sRuntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
"k8s.io/kubernetes/pkg/volume/util/types"
)
@ -240,7 +240,7 @@ func (grm *nestedPendingOperations) operationComplete(
if *err != nil {
// Log error
logOperationName := getOperationName(volumeName, podName)
glog.Errorf("operation %s failed with: %v",
klog.Errorf("operation %s failed with: %v",
logOperationName,
*err)
}
@ -252,7 +252,7 @@ func (grm *nestedPendingOperations) operationComplete(
if getOpErr != nil {
// Failed to find existing operation
logOperationName := getOperationName(volumeName, podName)
glog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.",
klog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.",
logOperationName,
*err)
return
@ -264,7 +264,7 @@ func (grm *nestedPendingOperations) operationComplete(
// Log error
operationName :=
getOperationName(volumeName, podName)
glog.Errorf("%v", grm.operations[existingOpIndex].expBackoff.
klog.Errorf("%v", grm.operations[existingOpIndex].expBackoff.
GenerateNoRetriesPermittedMsg(operationName))
}

View File

@ -18,20 +18,19 @@ go_library(
"//pkg/features:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/resizefs:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/nestedpendingoperations:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -44,10 +43,10 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
],
)

View File

@ -24,7 +24,7 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -248,7 +248,7 @@ func generateVolumeMsg(prefixMsg, suffixMsg, volumeName, details string) (simple
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
// MultiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.
// It is used to to prevent reporting the error from being reported more than once for a given volume.
// It is used to prevent reporting the error from being reported more than once for a given volume.
MultiAttachErrorReported bool
// VolumeName is the unique identifier for the volume that should be
@ -329,6 +329,10 @@ type VolumeToMount struct {
// the volume.Attacher interface
PluginIsAttachable bool
// PluginIsDeviceMountable indicates that the plugin for this volume implements
// the volume.DeviceMounter interface
PluginIsDeviceMountable bool
// VolumeGidValue contains the value of the GID annotation, if present.
VolumeGidValue string
@ -623,14 +627,14 @@ func (oe *operationExecutor) VerifyVolumesAreAttached(
for node, nodeAttachedVolumes := range attachedVolumes {
for _, volumeAttached := range nodeAttachedVolumes {
if volumeAttached.VolumeSpec == nil {
glog.Errorf("VerifyVolumesAreAttached: nil spec for volume %s", volumeAttached.VolumeName)
klog.Errorf("VerifyVolumesAreAttached: nil spec for volume %s", volumeAttached.VolumeName)
continue
}
volumePlugin, err :=
oe.operationGenerator.GetVolumePluginMgr().FindPluginBySpec(volumeAttached.VolumeSpec)
if err != nil || volumePlugin == nil {
glog.Errorf(
klog.Errorf(
"VolumesAreAttached.FindPluginBySpec failed for volume %q (spec.Name: %q) on node %q with error: %v",
volumeAttached.VolumeName,
volumeAttached.VolumeSpec.Name(),
@ -669,7 +673,7 @@ func (oe *operationExecutor) VerifyVolumesAreAttached(
// If node doesn't support Bulk volume polling it is best to poll individually
nodeError := oe.VerifyVolumesAreAttachedPerNode(nodeAttachedVolumes, node, actualStateOfWorld)
if nodeError != nil {
glog.Errorf("BulkVerifyVolumes.VerifyVolumesAreAttached verifying volumes on node %q with %v", node, nodeError)
klog.Errorf("BulkVerifyVolumes.VerifyVolumesAreAttached verifying volumes on node %q with %v", node, nodeError)
}
break
}
@ -682,14 +686,14 @@ func (oe *operationExecutor) VerifyVolumesAreAttached(
volumeSpecMapByPlugin[pluginName],
actualStateOfWorld)
if err != nil {
glog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err)
klog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err)
}
// Ugly hack to ensure - we don't do parallel bulk polling of same volume plugin
uniquePluginName := v1.UniqueVolumeName(pluginName)
err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, generatedOperations)
if err != nil {
glog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err)
klog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err)
}
}
}
@ -738,8 +742,8 @@ func (oe *operationExecutor) MountVolume(
podName := nestedpendingoperations.EmptyUniquePodName
// TODO: remove this -- not necessary
if !volumeToMount.PluginIsAttachable {
// Non-attachable volume plugins can execute mount for multiple pods
if !volumeToMount.PluginIsAttachable && !volumeToMount.PluginIsDeviceMountable {
// volume plugins which are Non-attachable and Non-deviceMountable can execute mount for multiple pods
// referencing the same volume in parallel
podName = util.GetUniquePodName(volumeToMount.Pod)
}
@ -858,7 +862,7 @@ func (oe *operationExecutor) ReconstructVolumeOperation(
// Filesystem Volume case
if volumeMode == v1.PersistentVolumeFilesystem {
// Create volumeSpec from mount path
glog.V(5).Infof("Starting operationExecutor.ReconstructVolumepodName")
klog.V(5).Infof("Starting operationExecutor.ReconstructVolumepodName")
volumeSpec, err := plugin.ConstructVolumeSpec(volumeSpecName, mountPath)
if err != nil {
return nil, err
@ -868,7 +872,7 @@ func (oe *operationExecutor) ReconstructVolumeOperation(
// Block Volume case
// Create volumeSpec from mount path
glog.V(5).Infof("Starting operationExecutor.ReconstructVolume")
klog.V(5).Infof("Starting operationExecutor.ReconstructVolume")
if mapperPlugin == nil {
return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)",
pluginName,

View File

@ -48,7 +48,7 @@ const (
var _ OperationGenerator = &fakeOperationGenerator{}
func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachablePlugins(t *testing.T) {
func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachableAndNonDevicemountablePlugins(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToMount := make([]VolumeToMount, numVolumesToMount)
@ -60,10 +60,11 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachablePlugins(t
podName := "pod-" + strconv.Itoa((i + 1))
pod := getTestPodWithSecret(podName, secretName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
VolumeName: volumeName,
PluginIsAttachable: false, // this field determines whether the plugin is attachable
ReportedInUse: true,
Pod: pod,
VolumeName: volumeName,
PluginIsAttachable: false, // this field determines whether the plugin is attachable
PluginIsDeviceMountable: false, // this field determines whether the plugin is devicemountable
ReportedInUse: true,
}
oe.MountVolume(0 /* waitForAttachTimeOut */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */, false /* isRemount */)
}
@ -99,6 +100,31 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins(t *te
}
}
func TestOperationExecutor_MountVolume_ConcurrentMountForDeviceMountablePlugins(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToMount := make([]VolumeToMount, numVolumesToAttach)
pdName := "pd-volume"
volumeName := v1.UniqueVolumeName(pdName)
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
VolumeName: volumeName,
PluginIsDeviceMountable: true, // this field determines whether the plugin is devicemountable
ReportedInUse: true,
}
oe.MountVolume(0 /* waitForAttachTimeout */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */, false /* isRemount */)
}
// Assert
if !isOperationRunSerially(ch, quit) {
t.Fatalf("Mount operations should not start concurrently for devicemountable volumes")
}
}
func TestOperationExecutor_UnmountVolume_ConcurrentUnmountForAllPlugins(t *testing.T) {
// Arrange
ch, quit, oe := setup()

View File

@ -22,7 +22,6 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -30,11 +29,11 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
expandcache "k8s.io/kubernetes/pkg/controller/volume/expand/cache"
"k8s.io/kubernetes/pkg/features"
kevents "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/resizefs"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
@ -72,11 +71,11 @@ func NewOperationGenerator(kubeClient clientset.Interface,
blkUtil volumepathhandler.BlockVolumePathHandler) OperationGenerator {
return &operationGenerator{
kubeClient: kubeClient,
volumePluginMgr: volumePluginMgr,
recorder: recorder,
kubeClient: kubeClient,
volumePluginMgr: volumePluginMgr,
recorder: recorder,
checkNodeCapabilitiesBeforeMount: checkNodeCapabilitiesBeforeMount,
blkUtil: blkUtil,
blkUtil: blkUtil,
}
}
@ -140,13 +139,14 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
// Iterate each volume spec and put them into a map index by the pluginName
for _, volumeAttached := range attachedVolumes {
if volumeAttached.VolumeSpec == nil {
glog.Errorf("VerifyVolumesAreAttached.GenerateVolumesAreAttachedFunc: nil spec for volume %s", volumeAttached.VolumeName)
klog.Errorf("VerifyVolumesAreAttached.GenerateVolumesAreAttachedFunc: nil spec for volume %s", volumeAttached.VolumeName)
continue
}
volumePlugin, err :=
og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec)
if err != nil || volumePlugin == nil {
glog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
continue
}
volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()]
if !pluginExists {
@ -165,7 +165,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
attachableVolumePlugin, err :=
og.volumePluginMgr.FindAttachablePluginByName(pluginName)
if err != nil || attachableVolumePlugin == nil {
glog.Errorf(
klog.Errorf(
"VolumeAreAttached.FindAttachablePluginBySpec failed for plugin %q with: %v",
pluginName,
err)
@ -174,7 +174,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
if newAttacherErr != nil {
glog.Errorf(
klog.Errorf(
"VolumesAreAttached.NewAttacher failed for getting plugin %q with: %v",
pluginName,
newAttacherErr)
@ -183,7 +183,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
attached, areAttachedErr := volumeAttacher.VolumesAreAttached(volumesSpecs, nodeName)
if areAttachedErr != nil {
glog.Errorf(
klog.Errorf(
"VolumesAreAttached failed for checking on node %q with: %v",
nodeName,
areAttachedErr)
@ -193,7 +193,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
for spec, check := range attached {
if !check {
actualStateOfWorld.MarkVolumeAsDetached(volumeSpecMap[spec], nodeName)
glog.V(1).Infof("VerifyVolumesAreAttached determined volume %q (spec.Name: %q) is no longer attached to node %q, therefore it was marked as detached.",
klog.V(1).Infof("VerifyVolumesAreAttached determined volume %q (spec.Name: %q) is no longer attached to node %q, therefore it was marked as detached.",
volumeSpecMap[spec], spec.Name(), nodeName)
}
}
@ -203,7 +203,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
return volumetypes.GeneratedOperations{
OperationFunc: volumesAreAttachedFunc,
CompleteFunc: util.OperationCompleteHook("<n/a>", "verify_volumes_are_attached_per_node"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume("<n/a>", nil), "verify_volumes_are_attached_per_node"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
@ -218,7 +218,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc(
attachableVolumePlugin, err :=
og.volumePluginMgr.FindAttachablePluginByName(pluginName)
if err != nil || attachableVolumePlugin == nil {
glog.Errorf(
klog.Errorf(
"BulkVerifyVolume.FindAttachablePluginBySpec failed for plugin %q with: %v",
pluginName,
err)
@ -228,7 +228,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc(
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
if newAttacherErr != nil {
glog.Errorf(
klog.Errorf(
"BulkVerifyVolume.NewAttacher failed for getting plugin %q with: %v",
attachableVolumePlugin,
newAttacherErr)
@ -237,13 +237,13 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc(
bulkVolumeVerifier, ok := volumeAttacher.(volume.BulkVolumeVerifier)
if !ok {
glog.Errorf("BulkVerifyVolume failed to type assert attacher %q", bulkVolumeVerifier)
klog.Errorf("BulkVerifyVolume failed to type assert attacher %q", bulkVolumeVerifier)
return nil, nil
}
attached, bulkAttachErr := bulkVolumeVerifier.BulkVerifyVolumes(pluginNodeVolumes)
if bulkAttachErr != nil {
glog.Errorf("BulkVerifyVolume.BulkVerifyVolumes Error checking volumes are attached with %v", bulkAttachErr)
klog.Errorf("BulkVerifyVolume.BulkVerifyVolumes Error checking volumes are attached with %v", bulkAttachErr)
return nil, nil
}
@ -252,7 +252,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc(
nodeVolumeSpecs, nodeChecked := attached[nodeName]
if !nodeChecked {
glog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and leaving volume %q as attached",
klog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and leaving volume %q as attached",
nodeName,
volumeSpec.Name())
continue
@ -261,7 +261,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc(
check := nodeVolumeSpecs[volumeSpec]
if !check {
glog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and volume %q",
klog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and volume %q",
nodeName,
volumeSpec.Name())
actualStateOfWorld.MarkVolumeAsDetached(volumeSpecMap[volumeSpec], nodeName)
@ -274,7 +274,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc(
return volumetypes.GeneratedOperations{
OperationFunc: bulkVolumeVerifyFunc,
CompleteFunc: util.OperationCompleteHook(pluginName, "verify_volumes_are_attached"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, nil), "verify_volumes_are_attached"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
@ -319,7 +319,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
derr.DevicePath)
if addErr != nil {
glog.Errorf("AttachVolume.MarkVolumeAsAttached failed to fix dangling volume error for volume %q with %s", volumeToAttach.VolumeName, addErr)
klog.Errorf("AttachVolume.MarkVolumeAsAttached failed to fix dangling volume error for volume %q with %s", volumeToAttach.VolumeName, addErr)
}
}
@ -332,7 +332,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
for _, pod := range volumeToAttach.ScheduledPods {
og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg)
}
glog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
klog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
// Update actual state of world
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
@ -348,7 +348,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
return volumetypes.GeneratedOperations{
OperationFunc: attachVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(attachableVolumePlugin.GetPluginName(), "volume_attach"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(attachableVolumePlugin.GetPluginName(), volumeToAttach.VolumeSpec), "volume_attach"),
}, nil
}
@ -416,7 +416,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
return volumeToDetach.GenerateError("DetachVolume.Detach failed", err)
}
glog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
klog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
// Update actual state of world
actualStateOfWorld.MarkVolumeAsDetached(
@ -427,7 +427,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
return volumetypes.GeneratedOperations{
OperationFunc: getVolumePluginMgrFunc,
CompleteFunc: util.OperationCompleteHook(pluginName, "volume_detach"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, volumeToDetach.VolumeSpec), "volume_detach"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
@ -477,6 +477,13 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
volumeAttacher, _ = attachableVolumePlugin.NewAttacher()
}
// get deviceMounter, if possible
deviceMountableVolumePlugin, _ := og.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeToMount.VolumeSpec)
var volumeDeviceMounter volume.DeviceMounter
if deviceMountableVolumePlugin != nil {
volumeDeviceMounter, _ = deviceMountableVolumePlugin.NewDeviceMounter()
}
var fsGroup *int64
if volumeToMount.Pod.Spec.SecurityContext != nil &&
volumeToMount.Pod.Spec.SecurityContext.FSGroup != nil {
@ -484,28 +491,31 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
}
mountVolumeFunc := func() (error, error) {
devicePath := volumeToMount.DevicePath
if volumeAttacher != nil {
// Wait for attachable volumes to finish attaching
glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
devicePath, err := volumeAttacher.WaitForAttach(
volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout)
devicePath, err = volumeAttacher.WaitForAttach(
volumeToMount.VolumeSpec, devicePath, volumeToMount.Pod, waitForAttachTimeout)
if err != nil {
// On failure, return error. Caller will log and retry.
return volumeToMount.GenerateError("MountVolume.WaitForAttach failed", err)
}
glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath)))
klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath)))
}
if volumeDeviceMounter != nil {
deviceMountPath, err :=
volumeAttacher.GetDeviceMountPath(volumeToMount.VolumeSpec)
volumeDeviceMounter.GetDeviceMountPath(volumeToMount.VolumeSpec)
if err != nil {
// On failure, return error. Caller will log and retry.
return volumeToMount.GenerateError("MountVolume.GetDeviceMountPath failed", err)
}
// Mount device to global mount path
err = volumeAttacher.MountDevice(
err = volumeDeviceMounter.MountDevice(
volumeToMount.VolumeSpec,
devicePath,
deviceMountPath)
@ -514,7 +524,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
return volumeToMount.GenerateError("MountVolume.MountDevice failed", err)
}
glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath)))
klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath)))
// Update actual state of world to reflect volume is globally mounted
markDeviceMountedErr := actualStateOfWorld.MarkDeviceAsMounted(
@ -531,7 +541,6 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
if resizeSimpleError != nil || resizeDetailedError != nil {
return resizeSimpleError, resizeDetailedError
}
}
if og.checkNodeCapabilitiesBeforeMount {
@ -551,11 +560,11 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
}
_, detailedMsg := volumeToMount.GenerateMsg("MountVolume.SetUp succeeded", "")
verbosity := glog.Level(1)
verbosity := klog.Level(1)
if isRemount {
verbosity = glog.Level(4)
verbosity = klog.Level(4)
}
glog.V(verbosity).Infof(detailedMsg)
klog.V(verbosity).Infof(detailedMsg)
// Update actual state of world
markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted(
@ -584,20 +593,19 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
return volumetypes.GeneratedOperations{
OperationFunc: mountVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_mount"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "volume_mount"),
}, nil
}
func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath, deviceMountPath, pluginName string) (simpleErr, detailedErr error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
glog.V(4).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName)
klog.V(4).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName)
return nil, nil
}
mounter := og.volumePluginMgr.Host.GetMounter(pluginName)
// Get expander, if possible
expandableVolumePlugin, _ :=
og.volumePluginMgr.FindExpandablePluginBySpec(volumeToMount.VolumeSpec)
og.volumePluginMgr.FindFSResizablePluginBySpec(volumeToMount.VolumeSpec)
if expandableVolumePlugin != nil &&
expandableVolumePlugin.RequiresFSResize() &&
@ -613,33 +621,20 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi
pvSpecCap := pv.Spec.Capacity[v1.ResourceStorage]
if pvcStatusCap.Cmp(pvSpecCap) < 0 {
// File system resize was requested, proceed
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.resizeFileSystem entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.resizeFileSystem entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
if volumeToMount.VolumeSpec.ReadOnly {
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem failed", "requested read-only file system")
glog.Warningf(detailedMsg)
klog.Warningf(detailedMsg)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
return nil, nil
}
diskFormatter := &mount.SafeFormatAndMount{
Interface: mounter,
Exec: og.volumePluginMgr.Host.GetExec(expandableVolumePlugin.GetPluginName()),
}
resizer := resizefs.NewResizeFs(diskFormatter)
resizeStatus, resizeErr := resizer.Resize(devicePath, deviceMountPath)
if resizeErr != nil {
if resizeErr := expandableVolumePlugin.ExpandFS(volumeToMount.VolumeSpec, devicePath, deviceMountPath, pvSpecCap, pvcStatusCap); resizeErr != nil {
return volumeToMount.GenerateError("MountVolume.resizeFileSystem failed", resizeErr)
}
if resizeStatus {
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem succeeded", "")
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg)
glog.Infof(detailedMsg)
}
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem succeeded", "")
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg)
klog.Infof(detailedMsg)
// File system resize succeeded, now update the PVC's Capacity to match the PV's
err = util.MarkFSResizeFinished(pvc, pv.Spec.Capacity, og.kubeClient)
if err != nil {
@ -685,7 +680,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
return volumeToUnmount.GenerateError("UnmountVolume.TearDown failed", unmountErr)
}
glog.Infof(
klog.Infof(
"UnmountVolume.TearDown succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGidValue %q",
volumeToUnmount.VolumeName,
volumeToUnmount.OuterVolumeSpecName,
@ -700,7 +695,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
if markVolMountedErr != nil {
// On failure, just log and exit
glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
}
return nil, nil
@ -708,7 +703,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
return volumetypes.GeneratedOperations{
OperationFunc: unmountVolumeFunc,
CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_unmount"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToUnmount.VolumeSpec), "volume_unmount"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
@ -717,20 +712,31 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
deviceToDetach AttachedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
mounter mount.Interface) (volumetypes.GeneratedOperations, error) {
// Get attacher plugin
attachableVolumePlugin, err :=
og.volumePluginMgr.FindAttachablePluginByName(deviceToDetach.PluginName)
if err != nil || attachableVolumePlugin == nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindAttachablePluginBySpec failed", err)
// Get DeviceMounter plugin
deviceMountableVolumePlugin, err :=
og.volumePluginMgr.FindDeviceMountablePluginByName(deviceToDetach.PluginName)
if err != nil || deviceMountableVolumePlugin == nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindDeviceMountablePluginByName failed", err)
}
volumeDeviceUmounter, err := deviceMountableVolumePlugin.NewDeviceUnmounter()
if err != nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDeviceUmounter failed", err)
}
volumeDetacher, err := attachableVolumePlugin.NewDetacher()
volumeDeviceMounter, err := deviceMountableVolumePlugin.NewDeviceMounter()
if err != nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDetacher failed", err)
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDeviceMounter failed", err)
}
unmountDeviceFunc := func() (error, error) {
deviceMountPath := deviceToDetach.DeviceMountPath
refs, err := attachableVolumePlugin.GetDeviceMountRefs(deviceMountPath)
//deviceMountPath := deviceToDetach.DeviceMountPath
deviceMountPath, err :=
volumeDeviceMounter.GetDeviceMountPath(deviceToDetach.VolumeSpec)
if err != nil {
// On failure, return error. Caller will log and retry.
return deviceToDetach.GenerateError("GetDeviceMountPath failed", err)
}
refs, err := deviceMountableVolumePlugin.GetDeviceMountRefs(deviceMountPath)
if err != nil || mount.HasMountRefs(deviceMountPath, refs) {
if err == nil {
@ -739,7 +745,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
return deviceToDetach.GenerateError("GetDeviceMountRefs check failed", err)
}
// Execute unmount
unmountDeviceErr := volumeDetacher.UnmountDevice(deviceMountPath)
unmountDeviceErr := volumeDeviceUmounter.UnmountDevice(deviceMountPath)
if unmountDeviceErr != nil {
// On failure, return error. Caller will log and retry.
return deviceToDetach.GenerateError("UnmountDevice failed", unmountDeviceErr)
@ -759,7 +765,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
fmt.Errorf("the device is in use when it was no longer expected to be in use"))
}
glog.Infof(deviceToDetach.GenerateMsg("UnmountDevice succeeded", ""))
klog.Infof(deviceToDetach.GenerateMsg("UnmountDevice succeeded", ""))
// Update actual state of world
markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
@ -774,7 +780,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
return volumetypes.GeneratedOperations{
OperationFunc: unmountDeviceFunc,
CompleteFunc: util.OperationCompleteHook(attachableVolumePlugin.GetPluginName(), "unmount_device"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(deviceMountableVolumePlugin.GetPluginName(), deviceToDetach.VolumeSpec), "unmount_device"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
@ -838,7 +844,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
}
if volumeAttacher != nil {
// Wait for attachable volumes to finish attaching
glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
klog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
devicePath, err = volumeAttacher.WaitForAttach(
volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout)
@ -847,7 +853,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
return volumeToMount.GenerateError("MapVolume.WaitForAttach failed", err)
}
glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath)))
klog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath)))
}
// A plugin doesn't have attacher also needs to map device to global map path with SetUpDevice()
@ -866,6 +872,16 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
return volumeToMount.GenerateError("MapVolume failed", fmt.Errorf("Device path of the volume is empty"))
}
// When kubelet is containerized, devicePath may be a symlink at a place unavailable to
// kubelet, so evaluate it on the host and expect that it links to a device in /dev,
// which will be available to containerized kubelet. If still it does not exist,
// AttachFileDevice will fail. If kubelet is not containerized, eval it anyway.
mounter := og.GetVolumePluginMgr().Host.GetMounter(blockVolumePlugin.GetPluginName())
devicePath, err = mounter.EvalHostSymlinks(devicePath)
if err != nil {
return volumeToMount.GenerateError("MapVolume.EvalHostSymlinks failed", err)
}
// Map device to global and pod device map path
volumeMapPath, volName := blockVolumeMapper.GetPodDeviceMapPath()
mapErr = blockVolumeMapper.MapDevice(devicePath, globalMapPath, volumeMapPath, volName, volumeToMount.Pod.UID)
@ -893,15 +909,15 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
// Device mapping for global map path succeeded
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath))
verbosity := glog.Level(4)
verbosity := klog.Level(4)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg)
glog.V(verbosity).Infof(detailedMsg)
klog.V(verbosity).Infof(detailedMsg)
// Device mapping for pod device map path succeeded
simpleMsg, detailedMsg = volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("volumeMapPath %q", volumeMapPath))
verbosity = glog.Level(1)
verbosity = klog.Level(1)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg)
glog.V(verbosity).Infof(detailedMsg)
klog.V(verbosity).Infof(detailedMsg)
// Update actual state of world
markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted(
@ -930,7 +946,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
return volumetypes.GeneratedOperations{
OperationFunc: mapVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "map_volume"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "map_volume"),
}, nil
}
@ -976,7 +992,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc(
return volumeToUnmount.GenerateError("UnmapVolume.UnmapDevice on global map path failed", unmapDeviceErr)
}
glog.Infof(
klog.Infof(
"UnmapVolume succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGidValue %q",
volumeToUnmount.VolumeName,
volumeToUnmount.OuterVolumeSpecName,
@ -991,7 +1007,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc(
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
if markVolUnmountedErr != nil {
// On failure, just log and exit
glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
}
return nil, nil
@ -999,7 +1015,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc(
return volumetypes.GeneratedOperations{
OperationFunc: unmapVolumeFunc,
CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "unmap_volume"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), volumeToUnmount.VolumeSpec), "unmap_volume"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
@ -1029,7 +1045,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
}
blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper(
string(deviceToDetach.VolumeName),
deviceToDetach.VolumeSpec.Name(),
"" /* podUID */)
if newUnmapperErr != nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewUnmapper failed", newUnmapperErr)
@ -1048,6 +1064,27 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
return deviceToDetach.GenerateError("UnmapDevice failed", err)
}
// The block volume is not referenced from Pods. Release file descriptor lock.
// This should be done before calling TearDownDevice, because some plugins that do local detach
// in TearDownDevice will fail in detaching device due to the refcnt on the loopback device.
klog.V(4).Infof("UnmapDevice: deviceToDetach.DevicePath: %v", deviceToDetach.DevicePath)
loopPath, err := og.blkUtil.GetLoopDevice(deviceToDetach.DevicePath)
if err != nil {
if err.Error() == volumepathhandler.ErrDeviceNotFound {
klog.Warningf(deviceToDetach.GenerateMsgDetailed("UnmapDevice: Couldn't find loopback device which takes file descriptor lock", fmt.Sprintf("device path: %q", deviceToDetach.DevicePath)))
} else {
errInfo := "UnmapDevice.GetLoopDevice failed to get loopback device, " + fmt.Sprintf("device path: %q", deviceToDetach.DevicePath)
return deviceToDetach.GenerateError(errInfo, err)
}
} else {
if len(loopPath) != 0 {
err = og.blkUtil.RemoveLoopDevice(loopPath)
if err != nil {
return deviceToDetach.GenerateError("UnmapDevice.RemoveLoopDevice failed", err)
}
}
}
// Execute tear down device
unmapErr := blockVolumeUnmapper.TearDownDevice(globalMapPath, deviceToDetach.DevicePath)
if unmapErr != nil {
@ -1060,26 +1097,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
removeMapPathErr := og.blkUtil.RemoveMapPath(globalMapPath)
if removeMapPathErr != nil {
// On failure, return error. Caller will log and retry.
return deviceToDetach.GenerateError("UnmapDevice failed", removeMapPathErr)
}
// The block volume is not referenced from Pods. Release file descriptor lock.
glog.V(4).Infof("UnmapDevice: deviceToDetach.DevicePath: %v", deviceToDetach.DevicePath)
loopPath, err := og.blkUtil.GetLoopDevice(deviceToDetach.DevicePath)
if err != nil {
if err.Error() == volumepathhandler.ErrDeviceNotFound {
glog.Warningf(deviceToDetach.GenerateMsgDetailed("UnmapDevice: Couldn't find loopback device which takes file descriptor lock", fmt.Sprintf("device path: %q", deviceToDetach.DevicePath)))
} else {
errInfo := "UnmapDevice.GetLoopDevice failed to get loopback device, " + fmt.Sprintf("device path: %q", deviceToDetach.DevicePath)
return deviceToDetach.GenerateError(errInfo, err)
}
} else {
if len(loopPath) != 0 {
err = og.blkUtil.RemoveLoopDevice(loopPath)
if err != nil {
return deviceToDetach.GenerateError("UnmapDevice.RemoveLoopDevice failed", err)
}
}
return deviceToDetach.GenerateError("UnmapDevice.RemoveMapPath failed", removeMapPathErr)
}
// Before logging that UnmapDevice succeeded and moving on,
@ -1097,7 +1115,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
fmt.Errorf("the device is in use when it was no longer expected to be in use"))
}
glog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
klog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
// Update actual state of world
markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
@ -1112,7 +1130,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
return volumetypes.GeneratedOperations{
OperationFunc: unmapDeviceFunc,
CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "unmap_device"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), deviceToDetach.VolumeSpec), "unmap_device"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
@ -1171,7 +1189,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
if attachedVolume.Name == volumeToMount.VolumeName {
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath)
glog.Infof(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath)))
klog.Infof(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath)))
if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry.
return volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr)
@ -1186,7 +1204,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
return volumetypes.GeneratedOperations{
OperationFunc: verifyControllerAttachedVolumeFunc,
CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "verify_controller_attached_volume"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "verify_controller_attached_volume"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
@ -1198,7 +1216,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach(
node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{})
if fetchErr != nil {
if errors.IsNotFound(fetchErr) {
glog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
return nil
}
@ -1222,7 +1240,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach(
}
// Volume is not marked as in use by node
glog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
klog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
return nil
}
@ -1237,6 +1255,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
if err != nil {
return volumetypes.GeneratedOperations{}, fmt.Errorf("Error finding plugin for expanding volume: %q with error %v", pvcWithResizeRequest.QualifiedName(), err)
}
if volumePlugin == nil {
return volumetypes.GeneratedOperations{}, fmt.Errorf("Can not find plugin for expanding volume: %q", pvcWithResizeRequest.QualifiedName())
}
@ -1251,10 +1270,11 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
pvcWithResizeRequest.CurrentSize)
if expandErr != nil {
detailedErr := fmt.Errorf("Error expanding volume %q of plugin %s : %v", pvcWithResizeRequest.QualifiedName(), volumePlugin.GetPluginName(), expandErr)
detailedErr := fmt.Errorf("error expanding volume %q of plugin %q: %v", pvcWithResizeRequest.QualifiedName(), volumePlugin.GetPluginName(), expandErr)
return detailedErr, detailedErr
}
glog.Infof("ExpandVolume succeeded for volume %s", pvcWithResizeRequest.QualifiedName())
klog.Infof("ExpandVolume succeeded for volume %s", pvcWithResizeRequest.QualifiedName())
newSize = updatedSize
// k8s doesn't have transactions, we can't guarantee that after updating PV - updating PVC will be
// successful, that is why all PVCs for which pvc.Spec.Size > pvc.Status.Size must be reprocessed
@ -1265,14 +1285,14 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
detailedErr := fmt.Errorf("Error updating PV spec capacity for volume %q with : %v", pvcWithResizeRequest.QualifiedName(), updateErr)
return detailedErr, detailedErr
}
glog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", pvcWithResizeRequest.QualifiedName())
klog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", pvcWithResizeRequest.QualifiedName())
}
// No Cloudprovider resize needed, lets mark resizing as done
// Rest of the volume expand controller code will assume PVC as *not* resized until pvc.Status.Size
// reflects user requested size.
if !volumePlugin.RequiresFSResize() {
glog.V(4).Infof("Controller resizing done for PVC %s", pvcWithResizeRequest.QualifiedName())
klog.V(4).Infof("Controller resizing done for PVC %s", pvcWithResizeRequest.QualifiedName())
err := resizeMap.MarkAsResized(pvcWithResizeRequest, newSize)
if err != nil {
@ -1285,7 +1305,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
err := resizeMap.MarkForFSResize(pvcWithResizeRequest)
if err != nil {
detailedErr := fmt.Errorf("Error updating pvc %s condition for fs resize : %v", pvcWithResizeRequest.QualifiedName(), err)
glog.Warning(detailedErr)
klog.Warning(detailedErr)
return nil, nil
}
}
@ -1301,7 +1321,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
return volumetypes.GeneratedOperations{
OperationFunc: expandVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "expand_volume"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeSpec), "expand_volume"),
}, nil
}
@ -1338,6 +1358,7 @@ func (og *operationGenerator) GenerateExpandVolumeFSWithoutUnmountingFunc(
fsResizeFunc := func() (error, error) {
resizeSimpleError, resizeDetailedError := og.resizeFileSystem(volumeToMount, volumeToMount.DevicePath, deviceMountPath, volumePlugin.GetPluginName())
if resizeSimpleError != nil || resizeDetailedError != nil {
return resizeSimpleError, resizeDetailedError
}
@ -1357,7 +1378,7 @@ func (og *operationGenerator) GenerateExpandVolumeFSWithoutUnmountingFunc(
return volumetypes.GeneratedOperations{
OperationFunc: fsResizeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_fs_resize"),
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "volume_fs_resize"),
}, nil
}
@ -1400,8 +1421,10 @@ func isDeviceOpened(deviceToDetach AttachedVolume, mounter mount.Interface) (boo
(devicePathErr != nil && strings.Contains(devicePathErr.Error(), "does not exist")) {
// not a device path or path doesn't exist
//TODO: refer to #36092
glog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath)
klog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath)
deviceOpened = false
} else if devicePathErr != nil {
return false, deviceToDetach.GenerateErrorDetailed("PathIsDevice failed", devicePathErr)
} else {
deviceOpened, deviceOpenedErr = mounter.DeviceOpened(deviceToDetach.DevicePath)
if deviceOpenedErr != nil {

View File

@ -6,13 +6,13 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/volume/util/recyclerclient",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -22,10 +22,10 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
],
)

View File

@ -20,13 +20,13 @@ import (
"fmt"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
type RecycleEventRecorder func(eventtype, message string)
@ -51,7 +51,7 @@ func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeC
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
klog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
// Generate unique name for the recycler pod - we need to get "already
// exists" error when a previous controller has already started recycling
@ -63,7 +63,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po
defer close(stopChannel)
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
if err != nil {
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
klog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return err
}
@ -84,10 +84,10 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po
err = waitForPod(pod, recyclerClient, podCh)
// In all cases delete the recycler pod and log its result.
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
klog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
klog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Returning recycler error is preferred, the pod will be deleted again on
@ -117,7 +117,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E
case *v1.Pod:
// POD changed
pod := event.Object.(*v1.Pod)
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
klog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
switch event.Type {
case watch.Added, watch.Modified:
if pod.Status.Phase == v1.PodSucceeded {
@ -142,7 +142,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E
case *v1.Event:
// Event received
podEvent := event.Object.(*v1.Event)
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
klog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
if event.Type == watch.Added {
recyclerClient.Event(podEvent.Type, podEvent.Message)
}

View File

@ -24,10 +24,13 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/resizefs"
"k8s.io/kubernetes/pkg/volume"
)
var (
knownResizeConditions map[v1.PersistentVolumeClaimConditionType]bool = map[v1.PersistentVolumeClaimConditionType]bool{
knownResizeConditions = map[v1.PersistentVolumeClaimConditionType]bool{
v1.PersistentVolumeClaimFileSystemResizePending: true,
v1.PersistentVolumeClaimResizing: true,
}
@ -123,3 +126,14 @@ func MergeResizeConditionOnPVC(
pvc.Status.Conditions = newConditions
return pvc
}
// GenericResizeFS : call generic filesystem resizer for plugins that don't have any special filesystem resize requirements
func GenericResizeFS(host volume.VolumeHost, pluginName, devicePath, deviceMountPath string) (bool, error) {
mounter := host.GetMounter(pluginName)
diskFormatter := &mount.SafeFormatAndMount{
Interface: mounter,
Exec: host.GetExec(pluginName),
}
resizer := resizefs.NewResizeFs(diskFormatter)
return resizer.Resize(devicePath, deviceMountPath)
}

View File

@ -9,7 +9,7 @@ go_library(
name = "go_default_library",
srcs = ["types.go"],
importpath = "k8s.io/kubernetes/pkg/volume/util/types",
deps = ["//vendor/k8s.io/apimachinery/pkg/types:go_default_library"],
deps = ["//staging/src/k8s.io/apimachinery/pkg/types:go_default_library"],
)
filegroup(

View File

@ -25,7 +25,6 @@ import (
"strings"
"syscall"
"github.com/golang/glog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -34,11 +33,13 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/api/legacyscheme"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"reflect"
@ -79,6 +80,16 @@ const (
VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby"
)
// VolumeZoneConfig contains config information about zonal volume.
type VolumeZoneConfig struct {
ZonePresent bool
ZonesPresent bool
ReplicaZoneFromNodePresent bool
Zone string
Zones string
ReplicaZoneFromNode string
}
// IsReady checks for the existence of a regular file
// called 'ready' in the given directory and returns
// true if that file exists.
@ -90,7 +101,7 @@ func IsReady(dir string) bool {
}
if !s.Mode().IsRegular() {
glog.Errorf("ready-file is not a file: %s", readyFile)
klog.Errorf("ready-file is not a file: %s", readyFile)
return false
}
@ -102,14 +113,14 @@ func IsReady(dir string) bool {
// created.
func SetReady(dir string) {
if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) {
glog.Errorf("Can't mkdir %s: %v", dir, err)
klog.Errorf("Can't mkdir %s: %v", dir, err)
return
}
readyFile := path.Join(dir, readyFileName)
file, err := os.Create(readyFile)
if err != nil {
glog.Errorf("Can't touch %s: %v", readyFile, err)
klog.Errorf("Can't touch %s: %v", readyFile, err)
return
}
file.Close()
@ -129,10 +140,10 @@ func UnmountPath(mountPath string, mounter mount.Interface) error {
func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error {
pathExists, pathErr := PathExists(mountPath)
if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
return nil
}
corruptedMnt := isCorruptedMnt(pathErr)
corruptedMnt := IsCorruptedMnt(pathErr)
if pathErr != nil && !corruptedMnt {
return fmt.Errorf("Error checking path: %v", pathErr)
}
@ -160,13 +171,13 @@ func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMou
}
if notMnt {
glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
}
}
// Unmount the mount path
glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
if err := mounter.Unmount(mountPath); err != nil {
return err
}
@ -175,7 +186,7 @@ func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMou
return mntErr
}
if notMnt {
glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
return os.Remove(mountPath)
}
return fmt.Errorf("Failed to unmount path %v", mountPath)
@ -188,15 +199,15 @@ func PathExists(path string) (bool, error) {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else if isCorruptedMnt(err) {
} else if IsCorruptedMnt(err) {
return true, err
} else {
return false, err
}
}
// isCorruptedMnt return true if err is about corrupted mount point
func isCorruptedMnt(err error) bool {
// IsCorruptedMnt return true if err is about corrupted mount point
func IsCorruptedMnt(err error) bool {
if err == nil {
return false
}
@ -211,7 +222,8 @@ func isCorruptedMnt(err error) bool {
case *os.SyscallError:
underlyingError = pe.Err
}
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO
}
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
@ -249,6 +261,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
return secret, nil
}
// GetClassForVolume locates storage class by persistent volume
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
@ -278,7 +291,7 @@ func checkVolumeNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]stri
if pv.Spec.NodeAffinity.Required != nil {
terms := pv.Spec.NodeAffinity.Required.NodeSelectorTerms
glog.V(10).Infof("Match for Required node selector terms %+v", terms)
klog.V(10).Infof("Match for Required node selector terms %+v", terms)
if !v1helper.MatchNodeSelectorTerms(terms, labels.Set(nodeLabels), nil) {
return fmt.Errorf("No matching NodeSelectorTerms")
}
@ -308,13 +321,129 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) {
return pod, nil
}
// SelectZoneForVolume is a wrapper around SelectZonesForVolume
// to select a single zone for a volume based on parameters
func SelectZoneForVolume(zoneParameterPresent, zonesParameterPresent bool, zoneParameter string, zonesParameter, zonesWithNodes sets.String, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm, pvcName string) (string, error) {
zones, err := SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent, zoneParameter, zonesParameter, zonesWithNodes, node, allowedTopologies, pvcName, 1)
if err != nil {
return "", err
}
zone, ok := zones.PopAny()
if !ok {
return "", fmt.Errorf("could not determine a zone to provision volume in")
}
return zone, nil
}
// SelectZonesForVolume selects zones for a volume based on several factors:
// node.zone, allowedTopologies, zone/zones parameters from storageclass,
// zones with active nodes from the cluster. The number of zones = replicas.
func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zoneParameter string, zonesParameter, zonesWithNodes sets.String, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm, pvcName string, numReplicas uint32) (sets.String, error) {
if zoneParameterPresent && zonesParameterPresent {
return nil, fmt.Errorf("both zone and zones StorageClass parameters must not be used at the same time")
}
var zoneFromNode string
// pick one zone from node if present
if node != nil {
// VolumeScheduling implicit since node is not nil
if zoneParameterPresent || zonesParameterPresent {
return nil, fmt.Errorf("zone[s] cannot be specified in StorageClass if VolumeBindingMode is set to WaitForFirstConsumer. Please specify allowedTopologies in StorageClass for constraining zones")
}
// pick node's zone for one of the replicas
var ok bool
zoneFromNode, ok = node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
return nil, fmt.Errorf("%s Label for node missing", kubeletapis.LabelZoneFailureDomain)
}
// if single replica volume and node with zone found, return immediately
if numReplicas == 1 {
return sets.NewString(zoneFromNode), nil
}
}
// pick zone from allowedZones if specified
allowedZones, err := ZonesFromAllowedTopologies(allowedTopologies)
if err != nil {
return nil, err
}
if (len(allowedTopologies) > 0) && (allowedZones.Len() == 0) {
return nil, fmt.Errorf("no matchLabelExpressions with %s key found in allowedTopologies. Please specify matchLabelExpressions with %s key", kubeletapis.LabelZoneFailureDomain, kubeletapis.LabelZoneFailureDomain)
}
if allowedZones.Len() > 0 {
// VolumeScheduling implicit since allowedZones present
if zoneParameterPresent || zonesParameterPresent {
return nil, fmt.Errorf("zone[s] cannot be specified in StorageClass if allowedTopologies specified")
}
// scheduler will guarantee if node != null above, zoneFromNode is member of allowedZones.
// so if zoneFromNode != "", we can safely assume it is part of allowedZones.
zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas)
if err != nil {
return nil, fmt.Errorf("cannot process zones in allowedTopologies: %v", err)
}
return zones, nil
}
// pick zone from parameters if present
if zoneParameterPresent {
if numReplicas > 1 {
return nil, fmt.Errorf("zone cannot be specified if desired number of replicas for pv is greather than 1. Please specify zones or allowedTopologies to specify desired zones")
}
return sets.NewString(zoneParameter), nil
}
if zonesParameterPresent {
if uint32(zonesParameter.Len()) < numReplicas {
return nil, fmt.Errorf("not enough zones found in zones parameter to provision a volume with %d replicas. Found %d zones, need %d zones", numReplicas, zonesParameter.Len(), numReplicas)
}
// directly choose from zones parameter; no zone from node need to be considered
return ChooseZonesForVolume(zonesParameter, pvcName, numReplicas), nil
}
// pick zone from zones with nodes
if zonesWithNodes.Len() > 0 {
// If node != null (and thus zoneFromNode != ""), zoneFromNode will be member of zonesWithNodes
zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas)
if err != nil {
return nil, fmt.Errorf("cannot process zones where nodes exist in the cluster: %v", err)
}
return zones, nil
}
return nil, fmt.Errorf("cannot determine zones to provision volume in")
}
// ZonesFromAllowedTopologies returns a list of zones specified in allowedTopologies
func ZonesFromAllowedTopologies(allowedTopologies []v1.TopologySelectorTerm) (sets.String, error) {
zones := make(sets.String)
for _, term := range allowedTopologies {
for _, exp := range term.MatchLabelExpressions {
if exp.Key == kubeletapis.LabelZoneFailureDomain {
for _, value := range exp.Values {
zones.Insert(value)
}
} else {
return nil, fmt.Errorf("unsupported key found in matchLabelExpressions: %s", exp.Key)
}
}
}
return zones, nil
}
// ZonesSetToLabelValue converts zones set to label value
func ZonesSetToLabelValue(strSet sets.String) string {
return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter)
}
// ZonesToSet converts a string containing a comma separated list of zones to set
func ZonesToSet(zonesString string) (sets.String, error) {
return stringToSet(zonesString, ",")
zones, err := stringToSet(zonesString, ",")
if err != nil {
return nil, fmt.Errorf("error parsing zones %s, must be strings separated by commas: %v", zonesString, err)
}
return zones, nil
}
// LabelZonesToSet converts a PV label value from string containing a delimited list of zones to set
@ -322,7 +451,7 @@ func LabelZonesToSet(labelZonesValue string) (sets.String, error) {
return stringToSet(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter)
}
// StringToSet converts a string containing list separated by specified delimiter to to a set
// StringToSet converts a string containing list separated by specified delimiter to a set
func stringToSet(str, delimiter string) (sets.String, error) {
zonesSlice := strings.Split(str, delimiter)
zonesSet := make(sets.String)
@ -339,6 +468,27 @@ func stringToSet(str, delimiter string) (sets.String, error) {
return zonesSet, nil
}
// LabelZonesToList converts a PV label value from string containing a delimited list of zones to list
func LabelZonesToList(labelZonesValue string) ([]string, error) {
return stringToList(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter)
}
// StringToList converts a string containing list separated by specified delimiter to a list
func stringToList(str, delimiter string) ([]string, error) {
zonesSlice := make([]string, 0)
for _, zone := range strings.Split(str, delimiter) {
trimmedZone := strings.TrimSpace(zone)
if trimmedZone == "" {
return nil, fmt.Errorf(
"%q separated list (%q) must not contain an empty string",
delimiter,
str)
}
zonesSlice = append(zonesSlice, trimmedZone)
}
return zonesSlice, nil
}
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
// recycle operation. The calculation and return value is either the
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
@ -361,7 +511,11 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
roundedUp := volumeSizeBytes / allocationUnitBytes
if volumeSizeBytes%allocationUnitBytes > 0 {
roundedUp++
}
return roundedUp
}
// RoundUpToGB rounds up given quantity to chunks of GB
@ -376,6 +530,32 @@ func RoundUpToGiB(size resource.Quantity) int64 {
return RoundUpSize(requestBytes, GIB)
}
// RoundUpSizeInt calculates how many allocation units are needed to accommodate
// a volume of given size. It returns an int instead of an int64 and an error if
// there's overflow
func RoundUpSizeInt(volumeSizeBytes int64, allocationUnitBytes int64) (int, error) {
roundedUp := RoundUpSize(volumeSizeBytes, allocationUnitBytes)
roundedUpInt := int(roundedUp)
if int64(roundedUpInt) != roundedUp {
return 0, fmt.Errorf("capacity %v is too great, casting results in integer overflow", roundedUp)
}
return roundedUpInt, nil
}
// RoundUpToGBInt rounds up given quantity to chunks of GB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToGBInt(size resource.Quantity) (int, error) {
requestBytes := size.Value()
return RoundUpSizeInt(requestBytes, GB)
}
// RoundUpToGiBInt rounds up given quantity upto chunks of GiB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToGiBInt(size resource.Quantity) (int, error) {
requestBytes := size.Value()
return RoundUpSizeInt(requestBytes, GIB)
}
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
@ -407,6 +587,11 @@ func GetPath(mounter volume.Mounter) (string, error) {
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
// assuming the id values are consecutive.
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
// No zones available, return empty string.
if zones.Len() == 0 {
return ""
}
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
@ -422,12 +607,45 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string {
zoneSlice := zones.List()
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
klog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
return zone
}
// chooseZonesForVolumeIncludingZone is a wrapper around ChooseZonesForVolume that ensures zoneToInclude is chosen
// zoneToInclude can either be empty in which case it is ignored. If non-empty, zoneToInclude is expected to be member of zones.
// numReplicas is expected to be > 0 and <= zones.Len()
func chooseZonesForVolumeIncludingZone(zones sets.String, pvcName, zoneToInclude string, numReplicas uint32) (sets.String, error) {
if numReplicas == 0 {
return nil, fmt.Errorf("invalid number of replicas passed")
}
if uint32(zones.Len()) < numReplicas {
return nil, fmt.Errorf("not enough zones found to provision a volume with %d replicas. Need at least %d distinct zones for a volume with %d replicas", numReplicas, numReplicas, numReplicas)
}
if zoneToInclude != "" && !zones.Has(zoneToInclude) {
return nil, fmt.Errorf("zone to be included: %s needs to be member of set: %v", zoneToInclude, zones)
}
if uint32(zones.Len()) == numReplicas {
return zones, nil
}
if zoneToInclude != "" {
zones.Delete(zoneToInclude)
numReplicas = numReplicas - 1
}
zonesChosen := ChooseZonesForVolume(zones, pvcName, numReplicas)
if zoneToInclude != "" {
zonesChosen.Insert(zoneToInclude)
}
return zonesChosen, nil
}
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
// No zones available, return empty set.
replicaZones := sets.NewString()
if zones.Len() == 0 {
return replicaZones
}
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
@ -441,7 +659,6 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
replicaZones := sets.NewString()
startingIndex := index * numZones
for index = startingIndex; index < startingIndex+numZones; index++ {
@ -449,7 +666,7 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se
replicaZones.Insert(zone)
}
glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
klog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
pvcName, replicaZones.UnsortedList(), zoneSlice)
return replicaZones
}
@ -457,7 +674,7 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se
func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
if pvcName == "" {
// We should always be called with a name; this shouldn't happen
glog.Warningf("No name defined during volume create; choosing random zone")
klog.Warningf("No name defined during volume create; choosing random zone")
hash = rand.Uint32()
} else {
@ -493,7 +710,7 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
hashString = hashString[lastDash+1:]
}
glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
klog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
}
}
@ -509,7 +726,7 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
// to empty_dir
func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
klog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
@ -551,7 +768,7 @@ func JoinMountOptions(userOptions []string, systemOptions []string) []string {
for _, mountOption := range systemOptions {
allMountOptions.Insert(mountOption)
}
return allMountOptions.UnsortedList()
return allMountOptions.List()
}
// ValidateZone returns:
@ -661,7 +878,7 @@ func notRunning(statuses []v1.ContainerStatus) bool {
}
// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow
// the fromat plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
// the format plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of
// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface
// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs
@ -704,6 +921,11 @@ func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.Per
return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name)
}
// GetPersistentVolumeClaimQualifiedName returns a qualified name for pvc.
func GetPersistentVolumeClaimQualifiedName(claim *v1.PersistentVolumeClaim) string {
return utilstrings.JoinQualifiedName(claim.GetNamespace(), claim.GetName())
}
// CheckVolumeModeFilesystem checks VolumeMode.
// If the mode is Filesystem, return true otherwise return false.
func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) {
@ -725,6 +947,38 @@ func CheckPersistentVolumeClaimModeBlock(pvc *v1.PersistentVolumeClaim) bool {
return utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock
}
// IsWindowsUNCPath checks if path is prefixed with \\
// This can be used to skip any processing of paths
// that point to SMB shares, local named pipes and local UNC path
func IsWindowsUNCPath(goos, path string) bool {
if goos != "windows" {
return false
}
// Check for UNC prefix \\
if strings.HasPrefix(path, `\\`) {
return true
}
return false
}
// IsWindowsLocalPath checks if path is a local path
// prefixed with "/" or "\" like "/foo/bar" or "\foo\bar"
func IsWindowsLocalPath(goos, path string) bool {
if goos != "windows" {
return false
}
if IsWindowsUNCPath(goos, path) {
return false
}
if strings.Contains(path, ":") {
return false
}
if !(strings.HasPrefix(path, `/`) || strings.HasPrefix(path, `\`)) {
return false
}
return true
}
// MakeAbsolutePath convert path to absolute path according to GOOS
func MakeAbsolutePath(goos, path string) string {
if goos != "windows" {

File diff suppressed because it is too large Load Diff

View File

@ -4,47 +4,14 @@ go_library(
name = "go_default_library",
srcs = [
"volume_path_handler.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"volume_path_handler_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"volume_path_handler_unsupported.go",
],
"//conditions:default": [],
}),
"volume_path_handler_linux.go",
"volume_path_handler_unsupported.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/util/volumepathhandler",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -23,7 +23,7 @@ import (
"path"
"path/filepath"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
)
@ -86,14 +86,14 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName
if !filepath.IsAbs(mapPath) {
return fmt.Errorf("The map path should be absolute: map path: %s", mapPath)
}
glog.V(5).Infof("MapDevice: devicePath %s", devicePath)
glog.V(5).Infof("MapDevice: mapPath %s", mapPath)
glog.V(5).Infof("MapDevice: linkName %s", linkName)
klog.V(5).Infof("MapDevice: devicePath %s", devicePath)
klog.V(5).Infof("MapDevice: mapPath %s", mapPath)
klog.V(5).Infof("MapDevice: linkName %s", linkName)
// Check and create mapPath
_, err := os.Stat(mapPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate map path: %s", mapPath)
klog.Errorf("cannot validate map path: %s", mapPath)
return err
}
if err = os.MkdirAll(mapPath, 0750); err != nil {
@ -115,15 +115,15 @@ func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to unmap device from map path. mapPath is empty")
}
glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
glog.V(5).Infof("UnmapDevice: linkName %s", linkName)
klog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
klog.V(5).Infof("UnmapDevice: linkName %s", linkName)
// Check symbolic link exists
linkPath := path.Join(mapPath, string(linkName))
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
return checkErr
} else if !islinkExist {
glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
klog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
return nil
}
err := os.Remove(linkPath)
@ -135,7 +135,7 @@ func (v VolumePathHandler) RemoveMapPath(mapPath string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to remove map path. mapPath is empty")
}
glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
klog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
err := os.RemoveAll(mapPath)
if err != nil && !os.IsNotExist(err) {
return err
@ -180,12 +180,12 @@ func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string)
if err != nil {
return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err)
}
glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath)
klog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath)
if filepath == devPath {
refs = append(refs, path.Join(mapPath, filename))
}
}
glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs)
klog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs)
return refs, nil
}
@ -201,7 +201,7 @@ func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath strin
return err
}
if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) {
glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
klog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
if res, err := compareSymlinks(path, mapPath); err == nil && res {
globalMapPathUUID = path
}
@ -211,7 +211,7 @@ func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath strin
if err != nil {
return "", err
}
glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
klog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
// Return path contains global map path + {pod uuid}
return globalMapPathUUID, nil
}
@ -225,7 +225,7 @@ func compareSymlinks(global, pod string) (bool, error) {
if err != nil {
return false, err
}
glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod)
klog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod)
if devGlobal == devPod {
return true, nil
}

View File

@ -25,7 +25,7 @@ import (
"os/exec"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
@ -38,7 +38,7 @@ func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
// If no existing loop device for the path, create one
if blockDevicePath == "" {
glog.V(4).Infof("Creating device for path: %s", path)
klog.V(4).Infof("Creating device for path: %s", path)
blockDevicePath, err = makeLoopDevice(path)
if err != nil {
return "", err
@ -61,7 +61,7 @@ func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out)
klog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out)
return "", err
}
return parseLosetupOutputForDevice(out)
@ -72,7 +72,7 @@ func makeLoopDevice(path string) (string, error) {
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out)
klog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out)
return "", err
}
return parseLosetupOutputForDevice(out)
@ -87,7 +87,7 @@ func (v VolumePathHandler) RemoveLoopDevice(device string) error {
if _, err := os.Stat(device); os.IsNotExist(err) {
return nil
}
glog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out)
klog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out)
return err
}
return nil