mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
32
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD
generated
vendored
@ -17,6 +17,7 @@ go_library(
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/metrics:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/populator:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/reconciler:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
@ -25,14 +26,14 @@ go_library(
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -54,13 +55,13 @@ go_test(
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -76,6 +77,7 @@ filegroup(
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/volumemanager/cache:all-srcs",
|
||||
"//pkg/kubelet/volumemanager/metrics:all-srcs",
|
||||
"//pkg/kubelet/volumemanager/populator:all-srcs",
|
||||
"//pkg/kubelet/volumemanager/reconciler:all-srcs",
|
||||
],
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD
generated
vendored
@ -19,10 +19,10 @@ go_library(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -38,8 +38,8 @@ go_test(
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
@ -24,11 +24,10 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -155,6 +154,11 @@ type ActualStateOfWorld interface {
|
||||
// mounted for the specified pod as requiring file system resize (if the plugin for the
|
||||
// volume indicates it requires file system resize).
|
||||
MarkFSResizeRequired(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName)
|
||||
|
||||
// GetAttachedVolumes returns a list of volumes that is known to be attached
|
||||
// to the node. This list can be used to determine volumes that are either in-use
|
||||
// or have a mount/unmount operation pending.
|
||||
GetAttachedVolumes() []AttachedVolume
|
||||
}
|
||||
|
||||
// MountedVolume represents a volume that has successfully been mounted to a pod.
|
||||
@ -407,7 +411,7 @@ func (asw *actualStateOfWorld) addVolume(
|
||||
} else {
|
||||
// If volume object already exists, update the fields such as device path
|
||||
volumeObj.devicePath = devicePath
|
||||
glog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q",
|
||||
klog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q",
|
||||
volumeName,
|
||||
devicePath)
|
||||
}
|
||||
@ -476,7 +480,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsResized(
|
||||
volumeName)
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized",
|
||||
klog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized",
|
||||
volumeName, podObj.outerVolumeSpecName, podName)
|
||||
podObj.fsResizeRequired = false
|
||||
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
|
||||
@ -497,7 +501,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired(
|
||||
asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
// Log and continue processing
|
||||
glog.Errorf(
|
||||
klog.Errorf(
|
||||
"MarkRemountRequired failed to FindPluginBySpec for pod %q (podUid %q) volume: %q (volSpecName: %q)",
|
||||
podObj.podName,
|
||||
podObj.podUID,
|
||||
@ -521,13 +525,13 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired(
|
||||
defer asw.Unlock()
|
||||
volumeObj, exist := asw.attachedVolumes[volumeName]
|
||||
if !exist {
|
||||
glog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName)
|
||||
klog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName)
|
||||
return
|
||||
}
|
||||
|
||||
podObj, exist := volumeObj.mountedPods[podName]
|
||||
if !exist {
|
||||
glog.Warningf("MarkFSResizeRequired for volume %s failed "+
|
||||
klog.Warningf("MarkFSResizeRequired for volume %s failed "+
|
||||
"as pod(%s) not exist", volumeName, podName)
|
||||
return
|
||||
}
|
||||
@ -536,7 +540,7 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired(
|
||||
asw.volumePluginMgr.FindExpandablePluginBySpec(podObj.volumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
// Log and continue processing
|
||||
glog.Errorf(
|
||||
klog.Errorf(
|
||||
"MarkFSResizeRequired failed to find expandable plugin for pod %q volume: %q (volSpecName: %q)",
|
||||
podObj.podName,
|
||||
volumeObj.volumeName,
|
||||
@ -546,7 +550,7 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired(
|
||||
|
||||
if volumePlugin.RequiresFSResize() {
|
||||
if !podObj.fsResizeRequired {
|
||||
glog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize",
|
||||
klog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize",
|
||||
volumeName, podObj.outerVolumeSpecName, podName)
|
||||
podObj.fsResizeRequired = true
|
||||
}
|
||||
@ -568,7 +572,9 @@ func (asw *actualStateOfWorld) SetVolumeGloballyMounted(
|
||||
|
||||
volumeObj.globallyMounted = globallyMounted
|
||||
volumeObj.deviceMountPath = deviceMountPath
|
||||
volumeObj.devicePath = devicePath
|
||||
if devicePath != "" {
|
||||
volumeObj.devicePath = devicePath
|
||||
}
|
||||
asw.attachedVolumes[volumeName] = volumeObj
|
||||
return nil
|
||||
}
|
||||
@ -709,6 +715,20 @@ func (asw *actualStateOfWorld) GetGloballyMountedVolumes() []AttachedVolume {
|
||||
return globallyMountedVolumes
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
allAttachedVolumes := make(
|
||||
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||
for _, volumeObj := range asw.attachedVolumes {
|
||||
allAttachedVolumes = append(
|
||||
allAttachedVolumes,
|
||||
asw.newAttachedVolume(&volumeObj))
|
||||
}
|
||||
|
||||
return allAttachedVolumes
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetUnmountedVolumes() []AttachedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
46
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
46
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
@ -150,6 +150,10 @@ type volumeToMount struct {
|
||||
// the volume.Attacher interface
|
||||
pluginIsAttachable bool
|
||||
|
||||
// pluginIsDeviceMountable indicates that the plugin for this volume implements
|
||||
// the volume.DeviceMounter interface
|
||||
pluginIsDeviceMountable bool
|
||||
|
||||
// volumeGidValue contains the value of the GID annotation, if present.
|
||||
volumeGidValue string
|
||||
|
||||
@ -220,13 +224,16 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
volumeName = util.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
|
||||
}
|
||||
|
||||
deviceMountable := dsw.isDeviceMountableVolume(volumeSpec)
|
||||
|
||||
if _, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
|
||||
dsw.volumesToMount[volumeName] = volumeToMount{
|
||||
volumeName: volumeName,
|
||||
podsToMount: make(map[types.UniquePodName]podToMount),
|
||||
pluginIsAttachable: attachable,
|
||||
volumeGidValue: volumeGidValue,
|
||||
reportedInUse: false,
|
||||
volumeName: volumeName,
|
||||
podsToMount: make(map[types.UniquePodName]podToMount),
|
||||
pluginIsAttachable: attachable,
|
||||
pluginIsDeviceMountable: deviceMountable,
|
||||
volumeGidValue: volumeGidValue,
|
||||
reportedInUse: false,
|
||||
}
|
||||
}
|
||||
|
||||
@ -346,14 +353,15 @@ func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount {
|
||||
volumesToMount,
|
||||
VolumeToMount{
|
||||
VolumeToMount: operationexecutor.VolumeToMount{
|
||||
VolumeName: volumeName,
|
||||
PodName: podName,
|
||||
Pod: podObj.pod,
|
||||
VolumeSpec: podObj.volumeSpec,
|
||||
PluginIsAttachable: volumeObj.pluginIsAttachable,
|
||||
OuterVolumeSpecName: podObj.outerVolumeSpecName,
|
||||
VolumeGidValue: volumeObj.volumeGidValue,
|
||||
ReportedInUse: volumeObj.reportedInUse}})
|
||||
VolumeName: volumeName,
|
||||
PodName: podName,
|
||||
Pod: podObj.pod,
|
||||
VolumeSpec: podObj.volumeSpec,
|
||||
PluginIsAttachable: volumeObj.pluginIsAttachable,
|
||||
PluginIsDeviceMountable: volumeObj.pluginIsDeviceMountable,
|
||||
OuterVolumeSpecName: podObj.outerVolumeSpecName,
|
||||
VolumeGidValue: volumeObj.volumeGidValue,
|
||||
ReportedInUse: volumeObj.reportedInUse}})
|
||||
}
|
||||
}
|
||||
return volumesToMount
|
||||
@ -371,3 +379,15 @@ func (dsw *desiredStateOfWorld) isAttachableVolume(volumeSpec *volume.Spec) bool
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) isDeviceMountableVolume(volumeSpec *volume.Spec) bool {
|
||||
deviceMountableVolumePlugin, _ := dsw.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeSpec)
|
||||
if deviceMountableVolumePlugin != nil {
|
||||
volumeDeviceMounter, err := deviceMountableVolumePlugin.NewDeviceMounter()
|
||||
if err == nil && volumeDeviceMounter != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
44
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["metrics.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["metrics_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
114
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go
generated
vendored
Normal file
114
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
pluginNameNotAvailable = "N/A"
|
||||
|
||||
// Metric keys for Volume Manager.
|
||||
volumeManagerTotalVolumes = "volume_manager_total_volumes"
|
||||
)
|
||||
|
||||
var (
|
||||
registerMetrics sync.Once
|
||||
|
||||
totalVolumesDesc = prometheus.NewDesc(
|
||||
volumeManagerTotalVolumes,
|
||||
"Number of volumes in Volume Manager",
|
||||
[]string{"plugin_name", "state"},
|
||||
nil,
|
||||
)
|
||||
)
|
||||
|
||||
// volumeCount is a map of maps used as a counter.
|
||||
type volumeCount map[string]map[string]int64
|
||||
|
||||
func (v volumeCount) add(state, plugin string) {
|
||||
count, ok := v[state]
|
||||
if !ok {
|
||||
count = map[string]int64{}
|
||||
}
|
||||
count[plugin]++
|
||||
v[state] = count
|
||||
}
|
||||
|
||||
// Register registers Volume Manager metrics.
|
||||
func Register(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, pluginMgr *volume.VolumePluginMgr) {
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(&totalVolumesCollector{asw, dsw, pluginMgr})
|
||||
})
|
||||
}
|
||||
|
||||
type totalVolumesCollector struct {
|
||||
asw cache.ActualStateOfWorld
|
||||
dsw cache.DesiredStateOfWorld
|
||||
pluginMgr *volume.VolumePluginMgr
|
||||
}
|
||||
|
||||
var _ prometheus.Collector = &totalVolumesCollector{}
|
||||
|
||||
// Describe implements the prometheus.Collector interface.
|
||||
func (c *totalVolumesCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- totalVolumesDesc
|
||||
}
|
||||
|
||||
// Collect implements the prometheus.Collector interface.
|
||||
func (c *totalVolumesCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
for stateName, pluginCount := range c.getVolumeCount() {
|
||||
for pluginName, count := range pluginCount {
|
||||
metric, err := prometheus.NewConstMetric(totalVolumesDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
pluginName,
|
||||
stateName)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to create metric : %v", err)
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *totalVolumesCollector) getVolumeCount() volumeCount {
|
||||
counter := make(volumeCount)
|
||||
for _, mountedVolume := range c.asw.GetMountedVolumes() {
|
||||
pluginName := volumeutil.GetFullQualifiedPluginNameForVolume(mountedVolume.PluginName, mountedVolume.VolumeSpec)
|
||||
if pluginName == "" {
|
||||
pluginName = pluginNameNotAvailable
|
||||
}
|
||||
counter.add("actual_state_of_world", pluginName)
|
||||
}
|
||||
|
||||
for _, volumeToMount := range c.dsw.GetVolumesToMount() {
|
||||
pluginName := pluginNameNotAvailable
|
||||
if plugin, err := c.pluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec); err == nil {
|
||||
pluginName = volumeutil.GetFullQualifiedPluginNameForVolume(plugin.GetPluginName(), volumeToMount.VolumeSpec)
|
||||
}
|
||||
counter.add("desired_state_of_world", pluginName)
|
||||
}
|
||||
return counter
|
||||
}
|
115
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics_test.go
generated
vendored
Normal file
115
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
func TestMetricCollection(t *testing.T) {
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(k8stypes.NodeName("node-name"), volumePluginMgr)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
|
||||
// Add one volume to DesiredStateOfWorld
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(podName, pod, volumeSpec, volumeSpec.Name(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
mounter, err := fakePlugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewMounter failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
mapper, err := fakePlugin.NewBlockVolumeMapper(volumeSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewBlockVolumeMapper failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Add one volume to ActualStateOfWorld
|
||||
devicePath := "fake/device/path"
|
||||
err = asw.MarkVolumeAsAttached("", volumeSpec, "", devicePath)
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "", volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
metricCollector := &totalVolumesCollector{asw, dsw, volumePluginMgr}
|
||||
|
||||
// Check if getVolumeCount returns correct data
|
||||
count := metricCollector.getVolumeCount()
|
||||
if len(count) != 2 {
|
||||
t.Errorf("getVolumeCount failed. Expected <2> states, got <%d>", len(count))
|
||||
}
|
||||
|
||||
dswCount, ok := count["desired_state_of_world"]
|
||||
if !ok {
|
||||
t.Errorf("getVolumeCount failed. Expected <desired_state_of_world>, got nothing")
|
||||
}
|
||||
|
||||
fakePluginCount := dswCount["fake-plugin"]
|
||||
if fakePluginCount != 1 {
|
||||
t.Errorf("getVolumeCount failed. Expected <1> fake-plugin volume in DesiredStateOfWorld, got <%d>",
|
||||
fakePluginCount)
|
||||
}
|
||||
|
||||
aswCount, ok := count["actual_state_of_world"]
|
||||
if !ok {
|
||||
t.Errorf("getVolumeCount failed. Expected <actual_state_of_world>, got nothing")
|
||||
}
|
||||
|
||||
fakePluginCount = aswCount["fake-plugin"]
|
||||
if fakePluginCount != 1 {
|
||||
t.Errorf("getVolumeCount failed. Expected <1> fake-plugin volume in ActualStateOfWorld, got <%d>",
|
||||
fakePluginCount)
|
||||
}
|
||||
}
|
36
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD
generated
vendored
@ -21,14 +21,14 @@ go_library(
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -47,7 +47,10 @@ filegroup(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["desired_state_of_world_populator_test.go"],
|
||||
srcs = [
|
||||
"desired_state_of_world_populator_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
@ -62,12 +65,13 @@ go_test(
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -127,16 +127,16 @@ type processedPods struct {
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) {
|
||||
// Wait for the completion of a loop that started after sources are all ready, then set hasAddedPods accordingly
|
||||
glog.Infof("Desired state populator starts to run")
|
||||
klog.Infof("Desired state populator starts to run")
|
||||
wait.PollUntil(dswp.loopSleepDuration, func() (bool, error) {
|
||||
done := sourcesReady.AllReady()
|
||||
dswp.populatorLoopFunc()()
|
||||
dswp.populatorLoop()
|
||||
return done, nil
|
||||
}, stopCh)
|
||||
dswp.hasAddedPodsLock.Lock()
|
||||
dswp.hasAddedPods = true
|
||||
dswp.hasAddedPodsLock.Unlock()
|
||||
wait.Until(dswp.populatorLoopFunc(), dswp.loopSleepDuration, stopCh)
|
||||
wait.Until(dswp.populatorLoop, dswp.loopSleepDuration, stopCh)
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) ReprocessPod(
|
||||
@ -150,26 +150,24 @@ func (dswp *desiredStateOfWorldPopulator) HasAddedPods() bool {
|
||||
return dswp.hasAddedPods
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() {
|
||||
return func() {
|
||||
dswp.findAndAddNewPods()
|
||||
func (dswp *desiredStateOfWorldPopulator) populatorLoop() {
|
||||
dswp.findAndAddNewPods()
|
||||
|
||||
// findAndRemoveDeletedPods() calls out to the container runtime to
|
||||
// determine if the containers for a given pod are terminated. This is
|
||||
// an expensive operation, therefore we limit the rate that
|
||||
// findAndRemoveDeletedPods() is called independently of the main
|
||||
// populator loop.
|
||||
if time.Since(dswp.timeOfLastGetPodStatus) < dswp.getPodStatusRetryDuration {
|
||||
glog.V(5).Infof(
|
||||
"Skipping findAndRemoveDeletedPods(). Not permitted until %v (getPodStatusRetryDuration %v).",
|
||||
dswp.timeOfLastGetPodStatus.Add(dswp.getPodStatusRetryDuration),
|
||||
dswp.getPodStatusRetryDuration)
|
||||
// findAndRemoveDeletedPods() calls out to the container runtime to
|
||||
// determine if the containers for a given pod are terminated. This is
|
||||
// an expensive operation, therefore we limit the rate that
|
||||
// findAndRemoveDeletedPods() is called independently of the main
|
||||
// populator loop.
|
||||
if time.Since(dswp.timeOfLastGetPodStatus) < dswp.getPodStatusRetryDuration {
|
||||
klog.V(5).Infof(
|
||||
"Skipping findAndRemoveDeletedPods(). Not permitted until %v (getPodStatusRetryDuration %v).",
|
||||
dswp.timeOfLastGetPodStatus.Add(dswp.getPodStatusRetryDuration),
|
||||
dswp.getPodStatusRetryDuration)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
return
|
||||
}
|
||||
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool {
|
||||
@ -232,7 +230,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
var getPodsErr error
|
||||
runningPods, getPodsErr = dswp.kubeContainerRuntime.GetPods(false)
|
||||
if getPodsErr != nil {
|
||||
glog.Errorf(
|
||||
klog.Errorf(
|
||||
"kubeContainerRuntime.findAndRemoveDeletedPods returned error %v.",
|
||||
getPodsErr)
|
||||
continue
|
||||
@ -254,17 +252,17 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
}
|
||||
|
||||
if runningContainers {
|
||||
glog.V(4).Infof(
|
||||
klog.V(4).Infof(
|
||||
"Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore, it will not be removed from volume manager.",
|
||||
format.Pod(volumeToMount.Pod))
|
||||
continue
|
||||
}
|
||||
|
||||
if !dswp.actualStateOfWorld.VolumeExists(volumeToMount.VolumeName) && podExists {
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Actual state has not yet has this information skip removing volume from desired state", ""))
|
||||
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Actual state has not yet has this information skip removing volume from desired state", ""))
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", ""))
|
||||
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", ""))
|
||||
|
||||
dswp.desiredStateOfWorld.DeletePodFromVolume(
|
||||
volumeToMount.PodName, volumeToMount.VolumeName)
|
||||
@ -295,7 +293,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
|
||||
pvc, volumeSpec, volumeGidValue, err :=
|
||||
dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
if err != nil {
|
||||
glog.Errorf(
|
||||
klog.Errorf(
|
||||
"Error processing volume %q for pod %q: %v",
|
||||
podVolume.Name,
|
||||
format.Pod(pod),
|
||||
@ -308,7 +306,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
|
||||
_, err = dswp.desiredStateOfWorld.AddPodToVolume(
|
||||
uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue)
|
||||
if err != nil {
|
||||
glog.Errorf(
|
||||
klog.Errorf(
|
||||
"Failed to add volume %q (specName: %q) for pod %q to desiredStateOfWorld. err=%v",
|
||||
podVolume.Name,
|
||||
volumeSpec.Name(),
|
||||
@ -317,7 +315,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
|
||||
allVolumesAdded = false
|
||||
}
|
||||
|
||||
glog.V(4).Infof(
|
||||
klog.V(4).Infof(
|
||||
"Added volume %q (volSpec=%q) for pod %q to desired state.",
|
||||
podVolume.Name,
|
||||
volumeSpec.Name(),
|
||||
@ -367,12 +365,12 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
|
||||
}
|
||||
fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec)
|
||||
if err != nil {
|
||||
glog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v",
|
||||
klog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v",
|
||||
uniqueVolumeName, podVolume.Name, err)
|
||||
return
|
||||
}
|
||||
if !fsVolume {
|
||||
glog.V(5).Infof("Block mode volume needn't to check file system resize request")
|
||||
klog.V(5).Infof("Block mode volume needn't to check file system resize request")
|
||||
return
|
||||
}
|
||||
if processedVolumesForFSResize.Has(string(uniqueVolumeName)) {
|
||||
@ -382,7 +380,7 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
|
||||
}
|
||||
if mountedReadOnlyByPod(podVolume, pod) {
|
||||
// This volume is used as read only by this pod, we don't perform resize for read only volumes.
|
||||
glog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+
|
||||
klog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+
|
||||
"as the volume is mounted as readonly", podVolume.Name, pod.Namespace, pod.Name)
|
||||
return
|
||||
}
|
||||
@ -476,7 +474,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
|
||||
if pvcSource :=
|
||||
podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
glog.V(5).Infof(
|
||||
klog.V(5).Infof(
|
||||
"Found PVC, ClaimName: %q/%q",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName)
|
||||
@ -493,7 +491,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
}
|
||||
pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID
|
||||
|
||||
glog.V(5).Infof(
|
||||
klog.V(5).Infof(
|
||||
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
@ -511,9 +509,9 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
err)
|
||||
}
|
||||
|
||||
glog.V(5).Infof(
|
||||
klog.V(5).Infof(
|
||||
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
|
||||
volumeSpec.Name,
|
||||
volumeSpec.Name(),
|
||||
pvName,
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -45,12 +46,14 @@ import (
|
||||
|
||||
func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
@ -151,7 +154,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||
|
||||
func TestFindAndAddNewPods_FindAndRemoveDeletedPods_Valid_Block_VolumeDevices(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeBlock
|
||||
@ -256,9 +259,6 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods_Valid_Block_VolumeDevices(t
|
||||
expectedVolumeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func TestCreateVolumeSpec_Valid_File_VolumeMounts(t *testing.T) {
|
||||
@ -309,7 +309,7 @@ func TestCreateVolumeSpec_Valid_File_VolumeMounts(t *testing.T) {
|
||||
|
||||
func TestCreateVolumeSpec_Valid_Block_VolumeDevices(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeBlock
|
||||
@ -354,14 +354,11 @@ func TestCreateVolumeSpec_Valid_Block_VolumeDevices(t *testing.T) {
|
||||
if volumeSpec == nil || err != nil {
|
||||
t.Fatalf("Failed to create volumeSpec with combination of block mode and volumeDevices. err: %v", err)
|
||||
}
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func TestCreateVolumeSpec_Invalid_File_VolumeDevices(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeFilesystem
|
||||
@ -406,14 +403,11 @@ func TestCreateVolumeSpec_Invalid_File_VolumeDevices(t *testing.T) {
|
||||
if volumeSpec != nil || err == nil {
|
||||
t.Fatalf("Unexpected volumeMode and volumeMounts/volumeDevices combination is accepted")
|
||||
}
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeBlock
|
||||
@ -458,12 +452,10 @@ func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) {
|
||||
if volumeSpec != nil || err == nil {
|
||||
t.Fatalf("Unexpected volumeMode and volumeMounts/volumeDevices combination is accepted")
|
||||
}
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func TestCheckVolumeFSResize(t *testing.T) {
|
||||
mode := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
@ -472,6 +464,7 @@ func TestCheckVolumeFSResize(t *testing.T) {
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{RBD: &v1.RBDPersistentVolumeSource{}},
|
||||
Capacity: volumeCapacity(1),
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
@ -510,7 +503,7 @@ func TestCheckVolumeFSResize(t *testing.T) {
|
||||
reconcileASW(fakeASW, fakeDSW, t)
|
||||
|
||||
// No resize request for volume, volumes in ASW shouldn't be marked as fsResizeRequired.
|
||||
setExpandOnlinePersistentVolumesFeatureGate("true", t)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandInUsePersistentVolumes, true)()
|
||||
resizeRequiredVolumes := reprocess(dswp, uniquePodName, fakeDSW, fakeASW)
|
||||
if len(resizeRequiredVolumes) > 0 {
|
||||
t.Fatalf("No resize request for any volumes, but found resize required volumes in ASW: %v", resizeRequiredVolumes)
|
||||
@ -521,14 +514,14 @@ func TestCheckVolumeFSResize(t *testing.T) {
|
||||
pvc.Spec.Resources.Requests = volumeCapacity(2)
|
||||
|
||||
// Disable the feature gate, so volume shouldn't be marked as fsResizeRequired.
|
||||
setExpandOnlinePersistentVolumesFeatureGate("false", t)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandInUsePersistentVolumes, false)()
|
||||
resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW)
|
||||
if len(resizeRequiredVolumes) > 0 {
|
||||
t.Fatalf("Feature gate disabled, but found resize required volumes in ASW: %v", resizeRequiredVolumes)
|
||||
}
|
||||
|
||||
// Make volume used as ReadOnly, so volume shouldn't be marked as fsResizeRequired.
|
||||
setExpandOnlinePersistentVolumesFeatureGate("true", t)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandInUsePersistentVolumes, true)()
|
||||
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
||||
resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW)
|
||||
if len(resizeRequiredVolumes) > 0 {
|
||||
@ -561,13 +554,6 @@ func volumeCapacity(size int) v1.ResourceList {
|
||||
return v1.ResourceList{v1.ResourceStorage: resource.MustParse(fmt.Sprintf("%dGi", size))}
|
||||
}
|
||||
|
||||
func setExpandOnlinePersistentVolumesFeatureGate(value string, t *testing.T) {
|
||||
err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%s", features.ExpandInUsePersistentVolumes, value))
|
||||
if err != nil {
|
||||
t.Fatalf("Set ExpandInUsePersistentVolumes feature gate to %s failed: %v", value, err)
|
||||
}
|
||||
}
|
||||
|
||||
func reconcileASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) {
|
||||
for _, volumeToMount := range dsw.GetVolumesToMount() {
|
||||
err := asw.MarkVolumeAsAttached(volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "")
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/main_test.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/main_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package populator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
41
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
@ -23,19 +23,22 @@ go_library(
|
||||
"//pkg/volume/util/nestedpendingoperations:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["reconciler_test.go"],
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"reconciler_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
@ -45,17 +48,19 @@ go_test(
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/main_test.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/main_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package reconciler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
92
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
92
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
@ -26,13 +26,13 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
@ -151,7 +151,7 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
|
||||
// Otherwise, the reconstruct process may clean up pods' volumes that are still in use because
|
||||
// desired state of world does not contain a complete list of pods.
|
||||
if rc.populatorHasAddedPods() && !rc.StatesHasBeenSynced() {
|
||||
glog.Infof("Reconciler: start to sync state")
|
||||
klog.Infof("Reconciler: start to sync state")
|
||||
rc.sync()
|
||||
}
|
||||
}
|
||||
@ -167,7 +167,7 @@ func (rc *reconciler) reconcile() {
|
||||
for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() {
|
||||
if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) {
|
||||
// Volume is mounted, unmount it
|
||||
glog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", ""))
|
||||
klog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", ""))
|
||||
err := rc.operationExecutor.UnmountVolume(
|
||||
mountedVolume.MountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir)
|
||||
if err != nil &&
|
||||
@ -175,10 +175,10 @@ func (rc *reconciler) reconcile() {
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
klog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(mountedVolume.GenerateMsgDetailed("operationExecutor.UnmountVolume started", ""))
|
||||
klog.Infof(mountedVolume.GenerateMsgDetailed("operationExecutor.UnmountVolume started", ""))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -191,7 +191,7 @@ func (rc *reconciler) reconcile() {
|
||||
if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable {
|
||||
// Volume is not attached (or doesn't implement attacher), kubelet attach is disabled, wait
|
||||
// for controller to finish attaching volume.
|
||||
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""))
|
||||
klog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""))
|
||||
err := rc.operationExecutor.VerifyControllerAttachedVolume(
|
||||
volumeToMount.VolumeToMount,
|
||||
rc.nodeName,
|
||||
@ -201,10 +201,10 @@ func (rc *reconciler) reconcile() {
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.VerifyControllerAttachedVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.VerifyControllerAttachedVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume started", ""))
|
||||
klog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume started", ""))
|
||||
}
|
||||
} else {
|
||||
// Volume is not attached to node, kubelet attach is enabled, volume implements an attacher,
|
||||
@ -214,17 +214,17 @@ func (rc *reconciler) reconcile() {
|
||||
VolumeSpec: volumeToMount.VolumeSpec,
|
||||
NodeName: rc.nodeName,
|
||||
}
|
||||
glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""))
|
||||
klog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""))
|
||||
err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.AttachVolume started", ""))
|
||||
klog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.AttachVolume started", ""))
|
||||
}
|
||||
}
|
||||
} else if !volMounted || cache.IsRemountRequiredError(err) {
|
||||
@ -234,7 +234,7 @@ func (rc *reconciler) reconcile() {
|
||||
if isRemount {
|
||||
remountingLogStr = "Volume is already mounted to pod, but remount was requested."
|
||||
}
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr))
|
||||
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr))
|
||||
err := rc.operationExecutor.MountVolume(
|
||||
rc.waitForAttachTimeout,
|
||||
volumeToMount.VolumeToMount,
|
||||
@ -245,18 +245,18 @@ func (rc *reconciler) reconcile() {
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.MountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.MountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
if remountingLogStr == "" {
|
||||
glog.V(1).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr))
|
||||
klog.V(1).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr))
|
||||
} else {
|
||||
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr))
|
||||
klog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr))
|
||||
}
|
||||
}
|
||||
} else if cache.IsFSResizeRequiredError(err) &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) {
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", ""))
|
||||
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", ""))
|
||||
err := rc.operationExecutor.ExpandVolumeFSWithoutUnmounting(
|
||||
volumeToMount.VolumeToMount,
|
||||
rc.actualStateOfWorld)
|
||||
@ -265,10 +265,10 @@ func (rc *reconciler) reconcile() {
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error())
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", ""))
|
||||
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", ""))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -280,7 +280,7 @@ func (rc *reconciler) reconcile() {
|
||||
!rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName) {
|
||||
if attachedVolume.GloballyMounted {
|
||||
// Volume is globally mounted to device, unmount it
|
||||
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", ""))
|
||||
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", ""))
|
||||
err := rc.operationExecutor.UnmountDevice(
|
||||
attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter)
|
||||
if err != nil &&
|
||||
@ -288,20 +288,20 @@ func (rc *reconciler) reconcile() {
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountDevice failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
klog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountDevice failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.UnmountDevice started", ""))
|
||||
klog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.UnmountDevice started", ""))
|
||||
}
|
||||
} else {
|
||||
// Volume is attached to node, detach it
|
||||
// Kubelet not responsible for detaching or this volume has a non-attachable volume plugin.
|
||||
if rc.controllerAttachDetachEnabled || !attachedVolume.PluginIsAttachable {
|
||||
rc.actualStateOfWorld.MarkVolumeAsDetached(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath)))
|
||||
klog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath)))
|
||||
} else {
|
||||
// Only detach if kubelet detach is enabled
|
||||
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
|
||||
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
|
||||
err := rc.operationExecutor.DetachVolume(
|
||||
attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
@ -309,10 +309,10 @@ func (rc *reconciler) reconcile() {
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
klog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.DetachVolume started", ""))
|
||||
klog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.DetachVolume started", ""))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -369,14 +369,14 @@ func (rc *reconciler) syncStates() {
|
||||
// Get volumes information by reading the pod's directory
|
||||
podVolumes, err := getVolumesFromPodDir(rc.kubeletPodsDir)
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot get volumes from disk %v", err)
|
||||
klog.Errorf("Cannot get volumes from disk %v", err)
|
||||
return
|
||||
}
|
||||
volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume)
|
||||
volumeNeedReport := []v1.UniqueVolumeName{}
|
||||
for _, volume := range podVolumes {
|
||||
if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
klog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
// There is nothing to reconstruct
|
||||
continue
|
||||
}
|
||||
@ -387,11 +387,11 @@ func (rc *reconciler) syncStates() {
|
||||
if volumeInDSW {
|
||||
// Some pod needs the volume, don't clean it up and hope that
|
||||
// reconcile() calls SetUp and reconstructs the volume in ASW.
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
klog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// No pod needs the volume.
|
||||
glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err)
|
||||
klog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err)
|
||||
rc.cleanupMounts(volume)
|
||||
continue
|
||||
}
|
||||
@ -402,14 +402,14 @@ func (rc *reconciler) syncStates() {
|
||||
// this new kubelet so reconcile() calls SetUp and re-mounts the
|
||||
// volume if it's necessary.
|
||||
volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName)
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName)
|
||||
klog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// There is no pod that uses the volume.
|
||||
if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) {
|
||||
glog.Warning("Volume is in pending operation, skip cleaning up mounts")
|
||||
klog.Warning("Volume is in pending operation, skip cleaning up mounts")
|
||||
}
|
||||
glog.V(2).Infof(
|
||||
klog.V(2).Infof(
|
||||
"Reconciler sync states: could not find pod information in desired state, update it in actual state: %+v",
|
||||
reconstructedVolume)
|
||||
volumesNeedUpdate[reconstructedVolume.volumeName] = reconstructedVolume
|
||||
@ -417,7 +417,7 @@ func (rc *reconciler) syncStates() {
|
||||
|
||||
if len(volumesNeedUpdate) > 0 {
|
||||
if err = rc.updateStates(volumesNeedUpdate); err != nil {
|
||||
glog.Errorf("Error occurred during reconstruct volume from disk: %v", err)
|
||||
klog.Errorf("Error occurred during reconstruct volume from disk: %v", err)
|
||||
}
|
||||
}
|
||||
if len(volumeNeedReport) > 0 {
|
||||
@ -426,7 +426,7 @@ func (rc *reconciler) syncStates() {
|
||||
}
|
||||
|
||||
func (rc *reconciler) cleanupMounts(volume podVolume) {
|
||||
glog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points",
|
||||
klog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points",
|
||||
volume.podName, volume.volumeSpecName)
|
||||
mountedVolume := operationexecutor.MountedVolume{
|
||||
PodName: volume.podName,
|
||||
@ -439,7 +439,7 @@ func (rc *reconciler) cleanupMounts(volume podVolume) {
|
||||
// to unmount both volume and device in the same routine.
|
||||
err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir)
|
||||
if err != nil {
|
||||
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error())
|
||||
klog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -557,13 +557,13 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) {
|
||||
node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{})
|
||||
if fetchErr != nil {
|
||||
glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr)
|
||||
klog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr)
|
||||
} else {
|
||||
for _, attachedVolume := range node.Status.VolumesAttached {
|
||||
if volume, exists := volumesNeedUpdate[attachedVolume.Name]; exists {
|
||||
volume.devicePath = attachedVolume.DevicePath
|
||||
volumesNeedUpdate[attachedVolume.Name] = volume
|
||||
glog.V(4).Infof("Update devicePath from node status for volume (%q): %q", attachedVolume.Name, volume.devicePath)
|
||||
klog.V(4).Infof("Update devicePath from node status for volume (%q): %q", attachedVolume.Name, volume.devicePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -599,7 +599,7 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re
|
||||
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108
|
||||
volume.volumeName, volume.volumeSpec, "" /* nodeName */, volume.devicePath)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add volume information to actual state of world: %v", err)
|
||||
klog.Errorf("Could not add volume information to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
err = rc.actualStateOfWorld.MarkVolumeAsMounted(
|
||||
@ -612,22 +612,22 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re
|
||||
volume.volumeGidValue,
|
||||
volume.volumeSpec)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add pod to volume information to actual state of world: %v", err)
|
||||
klog.Errorf("Could not add pod to volume information to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName)
|
||||
klog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName)
|
||||
if volume.attachablePlugin != nil {
|
||||
deviceMountPath, err := getDeviceMountPath(volume)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not find device mount path for volume %s", volume.volumeName)
|
||||
klog.Errorf("Could not find device mount path for volume %s", volume.volumeName)
|
||||
continue
|
||||
}
|
||||
err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName, volume.devicePath, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not mark device is mounted to actual state of world: %v", err)
|
||||
klog.Errorf("Could not mark device is mounted to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName)
|
||||
klog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -671,13 +671,13 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) {
|
||||
volumePluginPath := path.Join(volumesDir, pluginName)
|
||||
volumePluginDirs, err := utilfile.ReadDirNoStat(volumePluginPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err)
|
||||
klog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err)
|
||||
continue
|
||||
}
|
||||
unescapePluginName := utilstrings.UnescapeQualifiedNameForDisk(pluginName)
|
||||
for _, volumeName := range volumePluginDirs {
|
||||
mountPath := path.Join(volumePluginPath, volumeName)
|
||||
glog.V(5).Infof("podName: %v, mount path from volume plugin directory: %v, ", podName, mountPath)
|
||||
klog.V(5).Infof("podName: %v, mount path from volume plugin directory: %v, ", podName, mountPath)
|
||||
volumes = append(volumes, podVolume{
|
||||
podName: volumetypes.UniquePodName(podName),
|
||||
volumeSpecName: volumeName,
|
||||
@ -689,6 +689,6 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes)
|
||||
klog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes)
|
||||
return volumes, nil
|
||||
}
|
||||
|
144
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
144
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
@ -29,9 +29,11 @@ import (
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
@ -439,7 +441,8 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
|
||||
// no detach/teardownDevice calls.
|
||||
func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
@ -513,9 +516,6 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||
@ -526,7 +526,8 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
// Verifies there are no attach/detach calls.
|
||||
func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
@ -601,9 +602,6 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||
@ -614,7 +612,8 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
// Verifies one detach/teardownDevice calls are issued.
|
||||
func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
@ -698,9 +697,6 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
1 /* expectedTearDownDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyDetachCallCount(
|
||||
1 /* expectedDetachCallCount */, fakePlugin))
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||
@ -712,7 +708,8 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
// Verifies there are no attach/detach calls made.
|
||||
func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
@ -797,9 +794,6 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
assert.NoError(t, volumetesting.VerifyTearDownDeviceCallCount(
|
||||
1 /* expectedTearDownDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
@ -821,7 +815,8 @@ func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
volumePluginMgr := &volume.VolumePluginMgr{}
|
||||
@ -853,8 +848,6 @@ func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
@ -876,7 +869,8 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
volumePluginMgr := &volume.VolumePluginMgr{}
|
||||
@ -900,8 +894,6 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
@ -923,7 +915,8 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
volumePluginMgr := &volume.VolumePluginMgr{}
|
||||
@ -946,8 +939,6 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||
@ -957,14 +948,17 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
// Mark volume as fsResizeRequired in ASW.
|
||||
// Verifies volume's fsResizeRequired flag is cleared later.
|
||||
func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.ExpandInUsePersistentVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandInUsePersistentVolumes, true)()
|
||||
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv",
|
||||
UID: "pvuid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Name: "pvc"},
|
||||
ClaimRef: &v1.ObjectReference{Name: "pvc"},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
@ -974,6 +968,7 @@ func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv",
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
@ -1129,7 +1124,7 @@ func createTestClient() *fake.Clientset {
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
Name: "fake-plugin/fake-device1",
|
||||
DevicePath: "fake/path",
|
||||
DevicePath: "/fake/path",
|
||||
},
|
||||
}},
|
||||
}, nil
|
||||
@ -1170,3 +1165,96 @@ func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolume
|
||||
})
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func Test_Run_Positive_VolumeMountControllerAttachEnabledRace(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createTestClient()
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
kubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
reconciler := NewReconciler(
|
||||
kubeClient,
|
||||
true, /* controllerAttachDetachEnabled */
|
||||
reconcilerLoopSleepDuration,
|
||||
reconcilerSyncStatesSleepPeriod,
|
||||
waitForAttachTimeout,
|
||||
nodeName,
|
||||
dsw,
|
||||
asw,
|
||||
hasAddedPods,
|
||||
oex,
|
||||
&mount.FakeMounter{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Some steps are executes out of order in callbacks, follow the numbers.
|
||||
|
||||
// 1. Add a volume to DSW and wait until it's mounted
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
runReconciler(reconciler)
|
||||
waitForMount(t, fakePlugin, generatedVolumeName, asw)
|
||||
|
||||
finished := make(chan interface{})
|
||||
fakePlugin.UnmountDeviceHook = func(mountPath string) error {
|
||||
// Act:
|
||||
// 3. While a volume is being unmounted, add it back to the desired state of world
|
||||
klog.Infof("UnmountDevice called")
|
||||
generatedVolumeName, err = dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
|
||||
return nil
|
||||
}
|
||||
|
||||
fakePlugin.WaitForAttachHook = func(spec *volume.Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error) {
|
||||
// Assert
|
||||
// 4. When the volume is mounted again, expect that UnmountDevice operation did not clear devicePath
|
||||
if devicePath == "" {
|
||||
t.Errorf("Expected WaitForAttach called with devicePath from Node.Status")
|
||||
close(finished)
|
||||
return "", fmt.Errorf("Expected devicePath from Node.Status")
|
||||
}
|
||||
close(finished)
|
||||
return devicePath, nil
|
||||
}
|
||||
|
||||
// 2. Delete the volume from DSW (and wait for callbacks)
|
||||
dsw.DeletePodFromVolume(podName, generatedVolumeName)
|
||||
|
||||
<-finished
|
||||
waitForMount(t, fakePlugin, generatedVolumeName, asw)
|
||||
}
|
||||
|
39
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go
generated
vendored
39
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
@ -30,12 +29,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
@ -49,21 +50,21 @@ import (
|
||||
const (
|
||||
// reconcilerLoopSleepPeriod is the amount of time the reconciler loop waits
|
||||
// between successive executions
|
||||
reconcilerLoopSleepPeriod time.Duration = 100 * time.Millisecond
|
||||
reconcilerLoopSleepPeriod = 100 * time.Millisecond
|
||||
|
||||
// reconcilerSyncStatesSleepPeriod is the amount of time the reconciler reconstruct process
|
||||
// waits between successive executions
|
||||
reconcilerSyncStatesSleepPeriod time.Duration = 3 * time.Minute
|
||||
reconcilerSyncStatesSleepPeriod = 3 * time.Minute
|
||||
|
||||
// desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the
|
||||
// DesiredStateOfWorldPopulator loop waits between successive executions
|
||||
desiredStateOfWorldPopulatorLoopSleepPeriod time.Duration = 100 * time.Millisecond
|
||||
desiredStateOfWorldPopulatorLoopSleepPeriod = 100 * time.Millisecond
|
||||
|
||||
// desiredStateOfWorldPopulatorGetPodStatusRetryDuration is the amount of
|
||||
// time the DesiredStateOfWorldPopulator loop waits between successive pod
|
||||
// cleanup calls (to prevent calling containerruntime.GetPodStatus too
|
||||
// frequently).
|
||||
desiredStateOfWorldPopulatorGetPodStatusRetryDuration time.Duration = 2 * time.Second
|
||||
desiredStateOfWorldPopulatorGetPodStatusRetryDuration = 2 * time.Second
|
||||
|
||||
// podAttachAndMountTimeout is the maximum amount of time the
|
||||
// WaitForAttachAndMount call will wait for all volumes in the specified pod
|
||||
@ -74,11 +75,11 @@ const (
|
||||
// request to the pod).
|
||||
// Value is slightly offset from 2 minutes to make timeouts due to this
|
||||
// constant recognizable.
|
||||
podAttachAndMountTimeout time.Duration = 2*time.Minute + 3*time.Second
|
||||
podAttachAndMountTimeout = 2*time.Minute + 3*time.Second
|
||||
|
||||
// podAttachAndMountRetryInterval is the amount of time the GetVolumesForPod
|
||||
// call waits before retrying
|
||||
podAttachAndMountRetryInterval time.Duration = 300 * time.Millisecond
|
||||
podAttachAndMountRetryInterval = 300 * time.Millisecond
|
||||
|
||||
// waitForAttachTimeout is the maximum amount of time a
|
||||
// operationexecutor.Mount call will wait for a volume to be attached.
|
||||
@ -86,7 +87,7 @@ const (
|
||||
// minutes to complete for some volume plugins in some cases. While this
|
||||
// operation is waiting it only blocks other operations on the same device,
|
||||
// other devices are not affected.
|
||||
waitForAttachTimeout time.Duration = 10 * time.Minute
|
||||
waitForAttachTimeout = 10 * time.Minute
|
||||
)
|
||||
|
||||
// VolumeManager runs a set of asynchronous loops that figure out which volumes
|
||||
@ -242,13 +243,15 @@ func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan str
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
go vm.desiredStateOfWorldPopulator.Run(sourcesReady, stopCh)
|
||||
glog.V(2).Infof("The desired_state_of_world populator starts")
|
||||
klog.V(2).Infof("The desired_state_of_world populator starts")
|
||||
|
||||
glog.Infof("Starting Kubelet Volume Manager")
|
||||
klog.Infof("Starting Kubelet Volume Manager")
|
||||
go vm.reconciler.Run(stopCh)
|
||||
|
||||
metrics.Register(vm.actualStateOfWorld, vm.desiredStateOfWorld, vm.volumePluginMgr)
|
||||
|
||||
<-stopCh
|
||||
glog.Infof("Shutting down Kubelet Volume Manager")
|
||||
klog.Infof("Shutting down Kubelet Volume Manager")
|
||||
}
|
||||
|
||||
func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap {
|
||||
@ -292,9 +295,9 @@ func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName {
|
||||
// that volumes are marked in use as soon as the decision is made that the
|
||||
// volume *should* be attached to this node until it is safely unmounted.
|
||||
desiredVolumes := vm.desiredStateOfWorld.GetVolumesToMount()
|
||||
mountedVolumes := vm.actualStateOfWorld.GetGloballyMountedVolumes()
|
||||
volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(mountedVolumes))
|
||||
desiredVolumesMap := make(map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(mountedVolumes))
|
||||
allAttachedVolumes := vm.actualStateOfWorld.GetAttachedVolumes()
|
||||
volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(allAttachedVolumes))
|
||||
desiredVolumesMap := make(map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(allAttachedVolumes))
|
||||
|
||||
for _, volume := range desiredVolumes {
|
||||
if volume.PluginIsAttachable {
|
||||
@ -305,7 +308,7 @@ func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName {
|
||||
}
|
||||
}
|
||||
|
||||
for _, volume := range mountedVolumes {
|
||||
for _, volume := range allAttachedVolumes {
|
||||
if volume.PluginIsAttachable {
|
||||
if _, exists := desiredVolumesMap[volume.VolumeName]; !exists {
|
||||
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
|
||||
@ -344,7 +347,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
|
||||
klog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
|
||||
uniquePodName := util.GetUniquePodName(pod)
|
||||
|
||||
// Some pods expect to have Setup called over and over again to update.
|
||||
@ -352,7 +355,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
|
||||
// like Downward API, depend on this to update the contents of the volume).
|
||||
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
|
||||
|
||||
err := wait.Poll(
|
||||
err := wait.PollImmediate(
|
||||
podAttachAndMountRetryInterval,
|
||||
podAttachAndMountTimeout,
|
||||
vm.verifyVolumesMountedFunc(uniquePodName, expectedVolumes))
|
||||
@ -377,7 +380,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
|
||||
unattachedVolumes)
|
||||
}
|
||||
|
||||
glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod))
|
||||
klog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_test.go
generated
vendored
@ -167,6 +167,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
@ -183,6 +184,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: claim.ObjectMeta.Name,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
|
||||
@ -273,6 +275,7 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol
|
||||
},
|
||||
},
|
||||
}
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
@ -286,6 +289,7 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
|
Reference in New Issue
Block a user