vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -24,7 +24,7 @@ go_library(
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
@ -39,8 +39,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["volume_manager_test.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/configmap:go_default_library",
@ -53,8 +52,8 @@ go_test(
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",

View File

@ -15,9 +15,9 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
deps = [
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
@ -30,13 +30,12 @@ go_test(
"actual_state_of_world_test.go",
"desired_state_of_world_test.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],

View File

@ -29,9 +29,9 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// ActualStateOfWorld defines a set of thread-safe operations for the kubelet
@ -73,7 +73,7 @@ type ActualStateOfWorld interface {
// must unmounted prior to detach.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, an error is returned.
SetVolumeGloballyMounted(volumeName v1.UniqueVolumeName, globallyMounted bool) error
SetVolumeGloballyMounted(volumeName v1.UniqueVolumeName, globallyMounted bool, devicePath, deviceMountPath string) error
// DeletePodFromVolume removes the given pod from the given volume in the
// cache indicating the volume has been successfully unmounted from the pod.
@ -109,6 +109,13 @@ type ActualStateOfWorld interface {
// volumes that do not need to update contents should not fail.
PodExistsInVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (bool, string, error)
// VolumeExistsWithSpecName returns true if the given volume specified with the
// volume spec name (a.k.a., InnerVolumeSpecName) exists in the list of
// volumes that should be attached to this node.
// If a pod with the same name does not exist under the specified
// volume, false is returned.
VolumeExistsWithSpecName(podName volumetypes.UniquePodName, volumeSpecName string) bool
// VolumeExists returns true if the given volume exists in the list of
// attached volumes in the cache, indicating the volume is attached to this
// node.
@ -240,6 +247,10 @@ type attachedVolume struct {
// devicePath contains the path on the node where the volume is attached for
// attachable volumes
devicePath string
// deviceMountPath contains the path on the node where the device should
// be mounted after it is attached.
deviceMountPath string
}
// The mountedPod object represents a pod for which the kubelet volume manager
@ -318,13 +329,13 @@ func (asw *actualStateOfWorld) MarkVolumeAsUnmounted(
}
func (asw *actualStateOfWorld) MarkDeviceAsMounted(
volumeName v1.UniqueVolumeName) error {
return asw.SetVolumeGloballyMounted(volumeName, true /* globallyMounted */)
volumeName v1.UniqueVolumeName, devicePath, deviceMountPath string) error {
return asw.SetVolumeGloballyMounted(volumeName, true /* globallyMounted */, devicePath, deviceMountPath)
}
func (asw *actualStateOfWorld) MarkDeviceAsUnmounted(
volumeName v1.UniqueVolumeName) error {
return asw.SetVolumeGloballyMounted(volumeName, false /* globallyMounted */)
return asw.SetVolumeGloballyMounted(volumeName, false /* globallyMounted */, "", "")
}
// addVolume adds the given volume to the cache indicating the specified
@ -347,7 +358,7 @@ func (asw *actualStateOfWorld) addVolume(
}
if len(volumeName) == 0 {
volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
volumeName, err = util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
if err != nil {
return fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
@ -454,7 +465,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired(
}
func (asw *actualStateOfWorld) SetVolumeGloballyMounted(
volumeName v1.UniqueVolumeName, globallyMounted bool) error {
volumeName v1.UniqueVolumeName, globallyMounted bool, devicePath, deviceMountPath string) error {
asw.Lock()
defer asw.Unlock()
@ -466,6 +477,8 @@ func (asw *actualStateOfWorld) SetVolumeGloballyMounted(
}
volumeObj.globallyMounted = globallyMounted
volumeObj.deviceMountPath = deviceMountPath
volumeObj.devicePath = devicePath
asw.attachedVolumes[volumeName] = volumeObj
return nil
}
@ -529,6 +542,19 @@ func (asw *actualStateOfWorld) PodExistsInVolume(
return podExists, volumeObj.devicePath, nil
}
func (asw *actualStateOfWorld) VolumeExistsWithSpecName(podName volumetypes.UniquePodName, volumeSpecName string) bool {
asw.RLock()
defer asw.RUnlock()
for _, volumeObj := range asw.attachedVolumes {
for name := range volumeObj.mountedPods {
if podName == name && volumeObj.spec.Name() == volumeSpecName {
return true
}
}
}
return false
}
func (asw *actualStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName) bool {
asw.RLock()
@ -625,8 +651,11 @@ func (asw *actualStateOfWorld) newAttachedVolume(
VolumeSpec: attachedVolume.spec,
NodeName: asw.nodeName,
PluginIsAttachable: attachedVolume.pluginIsAttachable,
DevicePath: attachedVolume.devicePath},
GloballyMounted: attachedVolume.globallyMounted}
DevicePath: attachedVolume.devicePath,
DeviceMountPath: attachedVolume.deviceMountPath,
PluginName: attachedVolume.pluginName},
GloballyMounted: attachedVolume.globallyMounted,
}
}
// Compile-time check to ensure volumeNotAttachedError implements the error interface
@ -691,5 +720,6 @@ func getMountedVolume(
Mounter: mountedPod.mounter,
BlockVolumeMapper: mountedPod.blockVolumeMapper,
VolumeGidValue: mountedPod.volumeGidValue,
VolumeSpec: attachedVolume.spec}}
VolumeSpec: attachedVolume.spec,
DeviceMountPath: attachedVolume.deviceMountPath}}
}

View File

@ -23,8 +23,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
var emptyVolumeName = v1.UniqueVolumeName("")
@ -56,7 +56,7 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
devicePath := "fake/device/path"
generatedVolumeName, _ := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
generatedVolumeName, _ := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
// Act
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
@ -143,7 +143,7 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) {
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
generatedVolumeName, _ := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
generatedVolumeName, _ := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
@ -191,13 +191,13 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
},
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
if err != nil {
@ -222,6 +222,7 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
verifyVolumeDoesntExistInUnmountedVolumes(t, generatedVolumeName, asw)
verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName, asw)
verifyPodExistsInVolumeAsw(t, podName, generatedVolumeName, "fake/device/path" /* expectedDevicePath */, asw)
verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName, volumeSpec.Name(), asw)
}
// Populates data struct with a volume
@ -254,14 +255,14 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(
plugin, volumeSpec)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
if err != nil {
@ -292,9 +293,10 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
verifyVolumeDoesntExistInUnmountedVolumes(t, generatedVolumeName, asw)
verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName, asw)
verifyPodExistsInVolumeAsw(t, podName, generatedVolumeName, "fake/device/path" /* expectedDevicePath */, asw)
verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName, volumeSpec.Name(), asw)
}
// Calls AddPodToVolume() to add pod to empty data stuct
// Calls AddPodToVolume() to add pod to empty data struct
// Verifies call fails with "volume does not exist" error.
func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
// Arrange
@ -337,10 +339,10 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
err)
}
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
volumeName, err := util.GetUniqueVolumeNameFromSpec(
plugin, volumeSpec)
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
if err != nil {
@ -370,6 +372,7 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
volumeName,
false, /* expectVolumeToExist */
asw)
verifyVolumeDoesntExistWithSpecNameInVolumeAsw(t, podName, volumeSpec.Name(), asw)
}
// Calls MarkVolumeAsAttached() once to add volume
@ -400,7 +403,8 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
devicePath := "fake/device/path"
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
deviceMountPath := "fake/device/mount/path"
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
@ -408,7 +412,7 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
}
// Act
err = asw.MarkDeviceAsMounted(generatedVolumeName)
err = asw.MarkDeviceAsMounted(generatedVolumeName, devicePath, deviceMountPath)
// Assert
if err != nil {
@ -546,3 +550,33 @@ func verifyPodDoesntExistInVolumeAsw(
devicePath)
}
}
func verifyVolumeExistsWithSpecNameInVolumeAsw(
t *testing.T,
expectedPodName volumetypes.UniquePodName,
expectedVolumeName string,
asw ActualStateOfWorld) {
podExistsInVolume :=
asw.VolumeExistsWithSpecName(expectedPodName, expectedVolumeName)
if !podExistsInVolume {
t.Fatalf(
"ASW VolumeExistsWithSpecName result invalid. Expected: <true> Actual: <%v>",
podExistsInVolume)
}
}
func verifyVolumeDoesntExistWithSpecNameInVolumeAsw(
t *testing.T,
podToCheck volumetypes.UniquePodName,
volumeToCheck string,
asw ActualStateOfWorld) {
podExistsInVolume :=
asw.VolumeExistsWithSpecName(podToCheck, volumeToCheck)
if podExistsInVolume {
t.Fatalf(
"ASW VolumeExistsWithSpecName result invalid. Expected: <false> Actual: <%v>",
podExistsInVolume)
}
}

View File

@ -26,9 +26,9 @@ import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet
@ -98,6 +98,13 @@ type DesiredStateOfWorld interface {
// with pod's unique name. This map can be used to determine which pod is currently
// in desired state of world.
GetPods() map[types.UniquePodName]bool
// VolumeExistsWithSpecName returns true if the given volume specified with the
// volume spec name (a.k.a., InnerVolumeSpecName) exists in the list of
// volumes that should be attached to this node.
// If a pod with the same name does not exist under the specified
// volume, false is returned.
VolumeExistsWithSpecName(podName types.UniquePodName, volumeSpecName string) bool
}
// VolumeToMount represents a volume that is attached to this node and needs to
@ -199,7 +206,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
// For attachable volumes, use the unique volume name as reported by
// the plugin.
volumeName, err =
volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
@ -210,7 +217,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
} else {
// For non-attachable volumes, generate a unique name based on the pod
// namespace and name and the name of the volume within the pod.
volumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
volumeName = util.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
}
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
@ -234,7 +241,6 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
spec: volumeSpec,
outerVolumeSpecName: outerVolumeSpecName,
}
return volumeName, nil
}
@ -303,6 +309,19 @@ func (dsw *desiredStateOfWorld) PodExistsInVolume(
return podExists
}
func (dsw *desiredStateOfWorld) VolumeExistsWithSpecName(podName types.UniquePodName, volumeSpecName string) bool {
dsw.RLock()
defer dsw.RUnlock()
for _, volumeObj := range dsw.volumesToMount {
for name, podObj := range volumeObj.podsToMount {
if podName == name && podObj.spec.Name() == volumeSpecName {
return true
}
}
}
return false
}
func (dsw *desiredStateOfWorld) GetPods() map[types.UniquePodName]bool {
dsw.RLock()
defer dsw.RUnlock()

View File

@ -23,8 +23,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// Calls AddPodToVolume() to add new pod to new volume
@ -54,7 +54,7 @@ func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
// Act
generatedVolumeName, err := dsw.AddPodToVolume(
@ -69,6 +69,7 @@ func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) {
verifyVolumeExistsInVolumesToMount(
t, generatedVolumeName, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw)
}
// Calls AddPodToVolume() twice to add the same pod to the same volume
@ -98,7 +99,7 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
// Act
generatedVolumeName, err := dsw.AddPodToVolume(
@ -113,6 +114,7 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
verifyVolumeExistsInVolumesToMount(
t, generatedVolumeName, false /* expectReportedInUse */, dsw)
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw)
}
// Populates data struct with a new volume/pod
@ -142,7 +144,7 @@ func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
if err != nil {
@ -160,6 +162,7 @@ func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) {
verifyVolumeDoesntExist(t, generatedVolumeName, dsw)
verifyVolumeDoesntExistInVolumesToMount(t, generatedVolumeName, dsw)
verifyPodDoesntExistInVolumeDsw(t, podName, generatedVolumeName, dsw)
verifyVolumeDoesntExistWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw)
}
// Calls AddPodToVolume() to add three new volumes to data struct
@ -194,7 +197,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
}
volume1Spec := &volume.Spec{Volume: &pod1.Spec.Volumes[0]}
pod1Name := volumehelper.GetUniquePodName(pod1)
pod1Name := util.GetUniquePodName(pod1)
pod2 := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -216,7 +219,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
}
volume2Spec := &volume.Spec{Volume: &pod2.Spec.Volumes[0]}
pod2Name := volumehelper.GetUniquePodName(pod2)
pod2Name := util.GetUniquePodName(pod2)
pod3 := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -238,7 +241,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
}
volume3Spec := &volume.Spec{Volume: &pod3.Spec.Volumes[0]}
pod3Name := volumehelper.GetUniquePodName(pod3)
pod3Name := util.GetUniquePodName(pod3)
generatedVolume1Name, err := dsw.AddPodToVolume(
pod1Name, pod1, volume1Spec, volume1Spec.Name(), "" /* volumeGidValue */)
@ -380,3 +383,29 @@ func verifyPodDoesntExistInVolumeDsw(
podExistsInVolume)
}
}
func verifyVolumeExistsWithSpecNameInVolumeDsw(
t *testing.T,
expectedPodName volumetypes.UniquePodName,
expectedVolumeSpecName string,
dsw DesiredStateOfWorld) {
if podExistsInVolume := dsw.VolumeExistsWithSpecName(
expectedPodName, expectedVolumeSpecName); !podExistsInVolume {
t.Fatalf(
"DSW VolumeExistsWithSpecNam returned incorrect value. Expected: <true> Actual: <%v>",
podExistsInVolume)
}
}
func verifyVolumeDoesntExistWithSpecNameInVolumeDsw(
t *testing.T,
expectedPodName volumetypes.UniquePodName,
expectedVolumeSpecName string,
dsw DesiredStateOfWorld) {
if podExistsInVolume := dsw.VolumeExistsWithSpecName(
expectedPodName, expectedVolumeSpecName); podExistsInVolume {
t.Fatalf(
"DSW VolumeExistsWithSpecNam returned incorrect value. Expected: <true> Actual: <%v>",
podExistsInVolume)
}
}

View File

@ -21,7 +21,6 @@ go_library(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -48,8 +47,7 @@ filegroup(
go_test(
name = "go_default_test",
srcs = ["desired_state_of_world_populator_test.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/configmap:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
@ -60,8 +58,8 @@ go_test(
"//pkg/kubelet/status/testing:go_default_library",
"//pkg/kubelet/volumemanager/cache:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -41,9 +41,8 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorldPopulator periodically loops through the list of active
@ -84,6 +83,7 @@ func NewDesiredStateOfWorldPopulator(
podManager pod.Manager,
podStatusProvider status.PodStatusProvider,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
kubeContainerRuntime kubecontainer.Runtime,
keepTerminatedPodVolumes bool) DesiredStateOfWorldPopulator {
return &desiredStateOfWorldPopulator{
@ -93,6 +93,7 @@ func NewDesiredStateOfWorldPopulator(
podManager: podManager,
podStatusProvider: podStatusProvider,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
pods: processedPods{
processedPods: make(map[volumetypes.UniquePodName]bool)},
kubeContainerRuntime: kubeContainerRuntime,
@ -109,6 +110,7 @@ type desiredStateOfWorldPopulator struct {
podManager pod.Manager
podStatusProvider status.PodStatusProvider
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
pods processedPods
kubeContainerRuntime kubecontainer.Runtime
timeOfLastGetPodStatus time.Time
@ -124,6 +126,7 @@ type processedPods struct {
func (dswp *desiredStateOfWorldPopulator) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) {
// Wait for the completion of a loop that started after sources are all ready, then set hasAddedPods accordingly
glog.Infof("Desired state populator starts to run")
wait.PollUntil(dswp.loopSleepDuration, func() (bool, error) {
done := sourcesReady.AllReady()
dswp.populatorLoopFunc()()
@ -173,7 +176,7 @@ func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool {
if !found {
podStatus = pod.Status
}
return volumehelper.IsPodTerminated(pod, podStatus)
return util.IsPodTerminated(pod, podStatus)
}
// Iterate through all pods and add to desired state of world if they don't
@ -236,13 +239,13 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
}
if runningContainers {
glog.V(5).Infof(
glog.V(4).Infof(
"Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore, it will not be removed from volume manager.",
format.Pod(volumeToMount.Pod))
continue
}
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", ""))
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", ""))
dswp.desiredStateOfWorld.DeletePodFromVolume(
volumeToMount.PodName, volumeToMount.VolumeName)
@ -257,7 +260,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
return
}
uniquePodName := volumehelper.GetUniquePodName(pod)
uniquePodName := util.GetUniquePodName(pod)
if dswp.podPreviouslyProcessed(uniquePodName) {
return
}
@ -292,7 +295,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
allVolumesAdded = false
}
glog.V(10).Infof(
glog.V(4).Infof(
"Added volume %q (volSpec=%q) for pod %q to desired state.",
podVolume.Name,
volumeSpec.Name(),
@ -302,6 +305,9 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
// some of the volume additions may have failed, should not mark this pod as fully processed
if allVolumesAdded {
dswp.markPodProcessed(uniquePodName)
// New pod has been synced. Re-mount all volumes that need it
// (e.g. DownwardAPI)
dswp.actualStateOfWorld.MarkRemountRequired(uniquePodName)
}
}
@ -343,7 +349,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*volume.Spec, string, error) {
if pvcSource :=
podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
glog.V(10).Infof(
glog.V(5).Infof(
"Found PVC, ClaimName: %q/%q",
podNamespace,
pvcSource.ClaimName)
@ -359,7 +365,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
err)
}
glog.V(10).Infof(
glog.V(5).Infof(
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
podNamespace,
pvcSource.ClaimName,
@ -377,7 +383,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
err)
}
glog.V(10).Infof(
glog.V(5).Infof(
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
volumeSpec.Name,
pvName,
@ -387,7 +393,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode, err := volumehelper.GetVolumeMode(volumeSpec)
volumeMode, err := util.GetVolumeMode(volumeSpec)
if err != nil {
return nil, "", err
}
@ -435,7 +441,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
err)
}
if utilfeature.DefaultFeatureGate.Enabled(features.PVCProtection) {
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
// Pods that uses a PVC that is being deleted must not be started.
//
// In case an old kubelet is running without this check or some kubelets
@ -444,7 +450,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
// and users should not be that surprised.
// It should happen only in very rare case when scheduler schedules
// a pod and user deletes a PVC that's used by it at the same time.
if volumeutil.IsPVCBeingDeleted(pvc) {
if pvc.ObjectMeta.DeletionTimestamp != nil {
return "", "", fmt.Errorf(
"can't start pod because PVC %s/%s is being deleted",
namespace,
@ -519,7 +525,7 @@ func (dswp *desiredStateOfWorldPopulator) makeVolumeMap(containers []v1.Containe
}
func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string {
if volumeGid, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]; ok {
if volumeGid, ok := pv.Annotations[util.VolumeGidAnnotationKey]; ok {
return volumeGid
}

View File

@ -35,8 +35,8 @@ import (
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
@ -74,7 +74,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
fakePodManager.AddPod(pod)
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
@ -184,7 +184,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods_Valid_Block_VolumeDevices(t
fakePodManager.AddPod(pod)
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
@ -525,6 +525,7 @@ func createDswpWithVolume(t *testing.T, pv *v1.PersistentVolume, pvc *v1.Persist
podtest.NewFakeMirrorClient(), fakeSecretManager, fakeConfigMapManager)
fakesDSW := cache.NewDesiredStateOfWorld(fakeVolumePluginMgr)
fakeASW := cache.NewActualStateOfWorld("fake", fakeVolumePluginMgr)
fakeRuntime := &containertest.FakeRuntime{}
fakeStatusManager := status.NewManager(fakeClient, fakePodManager, &statustest.FakePodDeletionSafetyProvider{})
@ -536,6 +537,7 @@ func createDswpWithVolume(t *testing.T, pv *v1.PersistentVolume, pvc *v1.Persist
podManager: fakePodManager,
podStatusProvider: fakeStatusManager,
desiredStateOfWorld: fakesDSW,
actualStateOfWorld: fakeASW,
pods: processedPods{
processedPods: make(map[types.UniquePodName]bool)},
kubeContainerRuntime: fakeRuntime,

View File

@ -19,10 +19,10 @@ go_library(
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/nestedpendingoperations:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -36,15 +36,14 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["reconciler_test.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler",
library = ":go_default_library",
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/volumemanager/cache:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -15,7 +15,7 @@ limitations under the License.
*/
// Package reconciler implements interfaces that attempt to reconcile the
// desired state of the with the actual state of the world by triggering
// desired state of the world with the actual state of the world by triggering
// relevant actions (attach, detach, mount, unmount).
package reconciler
@ -41,10 +41,10 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
volumepkg "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// Reconciler runs a periodic loop to reconcile the desired state of the world
@ -140,26 +140,18 @@ type reconciler struct {
}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
// Wait for the populator to indicate that it has actually populated the desired state of world, meaning it has
// completed a populate loop that started after sources are all ready. After, there's no need to keep checking.
wait.PollUntil(rc.loopSleepDuration, func() (bool, error) {
rc.reconciliationLoopFunc(rc.populatorHasAddedPods())()
return rc.populatorHasAddedPods(), nil
}, stopCh)
wait.Until(rc.reconciliationLoopFunc(true), rc.loopSleepDuration, stopCh)
wait.Until(rc.reconciliationLoopFunc(), rc.loopSleepDuration, stopCh)
}
func (rc *reconciler) reconciliationLoopFunc(populatorHasAddedPods bool) func() {
func (rc *reconciler) reconciliationLoopFunc() func() {
return func() {
rc.reconcile()
// Add a check that the populator has added pods so that reconciler's reconstruct process will start
// after desired state of world is populated with pod volume information. Otherwise, reconciler's
// reconstruct process may add incomplete volume information and cause confusion. In addition, if the
// desired state of world has not been populated yet, the reconstruct process may clean up pods' volumes
// that are still in use because desired state of world does not contain a complete list of pods.
if populatorHasAddedPods && time.Since(rc.timeOfLastSync) > rc.syncDuration {
glog.V(5).Infof("Desired state of world has been populated with pods, starting reconstruct state function")
// Sync the state with the reality once after all existing pods are added to the desired state from all sources.
// Otherwise, the reconstruct process may clean up pods' volumes that are still in use because
// desired state of world does not contain a complete list of pods.
if rc.populatorHasAddedPods() && !rc.StatesHasBeenSynced() {
glog.Infof("Reconciler: start to sync state")
rc.sync()
}
}
@ -174,12 +166,10 @@ func (rc *reconciler) reconcile() {
// Ensure volumes that should be unmounted are unmounted.
for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() {
if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) {
volumeHandler, err := operationexecutor.NewVolumeHandler(mountedVolume.VolumeSpec, rc.operationExecutor)
if err != nil {
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountVolume failed"), err).Error())
continue
}
err = volumeHandler.UnmountVolumeHandler(mountedVolume.MountedVolume, rc.actualStateOfWorld)
// Volume is mounted, unmount it
glog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", ""))
err := rc.operationExecutor.UnmountVolume(
mountedVolume.MountedVolume, rc.actualStateOfWorld)
if err != nil &&
!nestedpendingoperations.IsAlreadyExists(err) &&
!exponentialbackoff.IsExponentialBackoff(err) {
@ -201,7 +191,7 @@ func (rc *reconciler) reconcile() {
if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable {
// Volume is not attached (or doesn't implement attacher), kubelet attach is disabled, wait
// for controller to finish attaching volume.
glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""))
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""))
err := rc.operationExecutor.VerifyControllerAttachedVolume(
volumeToMount.VolumeToMount,
rc.nodeName,
@ -224,7 +214,7 @@ func (rc *reconciler) reconcile() {
VolumeSpec: volumeToMount.VolumeSpec,
NodeName: rc.nodeName,
}
glog.V(12).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""))
glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""))
err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld)
if err != nil &&
!nestedpendingoperations.IsAlreadyExists(err) &&
@ -244,12 +234,12 @@ func (rc *reconciler) reconcile() {
if isRemount {
remountingLogStr = "Volume is already mounted to pod, but remount was requested."
}
volumeHandler, err := operationexecutor.NewVolumeHandler(volumeToMount.VolumeSpec, rc.operationExecutor)
if err != nil {
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for MountVolume failed"), err).Error())
continue
}
err = volumeHandler.MountVolumeHandler(rc.waitForAttachTimeout, volumeToMount.VolumeToMount, rc.actualStateOfWorld, isRemount, remountingLogStr)
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr))
err := rc.operationExecutor.MountVolume(
rc.waitForAttachTimeout,
volumeToMount.VolumeToMount,
rc.actualStateOfWorld,
isRemount)
if err != nil &&
!nestedpendingoperations.IsAlreadyExists(err) &&
!exponentialbackoff.IsExponentialBackoff(err) {
@ -273,12 +263,10 @@ func (rc *reconciler) reconcile() {
if !rc.desiredStateOfWorld.VolumeExists(attachedVolume.VolumeName) &&
!rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName) {
if attachedVolume.GloballyMounted {
volumeHandler, err := operationexecutor.NewVolumeHandler(attachedVolume.VolumeSpec, rc.operationExecutor)
if err != nil {
glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountDevice failed"), err).Error())
continue
}
err = volumeHandler.UnmountDeviceHandler(attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter)
// Volume is globally mounted to device, unmount it
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", ""))
err := rc.operationExecutor.UnmountDevice(
attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter)
if err != nil &&
!nestedpendingoperations.IsAlreadyExists(err) &&
!exponentialbackoff.IsExponentialBackoff(err) {
@ -297,7 +285,7 @@ func (rc *reconciler) reconcile() {
glog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath)))
} else {
// Only detach if kubelet detach is enabled
glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
err := rc.operationExecutor.DetachVolume(
attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
if err != nil &&
@ -319,11 +307,11 @@ func (rc *reconciler) reconcile() {
// sync process tries to observe the real world by scanning all pods' volume directories from the disk.
// If the actual and desired state of worlds are not consistent with the observed world, it means that some
// mounted volumes are left out probably during kubelet restart. This process will reconstruct
// the volumes and udpate the actual and desired states. In the following reconciler loop, those volumes will
// be cleaned up.
// the volumes and update the actual and desired states. For the volumes that cannot support reconstruction,
// it will try to clean up the mount paths with operation executor.
func (rc *reconciler) sync() {
defer rc.updateLastSyncTime()
rc.syncStates(rc.kubeletPodsDir)
rc.syncStates()
}
func (rc *reconciler) updateLastSyncTime() {
@ -348,7 +336,7 @@ type reconstructedVolume struct {
volumeSpec *volumepkg.Spec
outerVolumeSpecName string
pod *v1.Pod
pluginIsAttachable bool
attachablePlugin volumepkg.AttachableVolumePlugin
volumeGidValue string
devicePath string
reportedInUse bool
@ -356,66 +344,41 @@ type reconstructedVolume struct {
blockVolumeMapper volumepkg.BlockVolumeMapper
}
// reconstructFromDisk scans the volume directories under the given pod directory. If the volume is not
// in either actual or desired state of world, or pending operation, this function will reconstruct
// the volume spec and put it in both the actual and desired state of worlds. If no running
// container is mounting the volume, the volume will be removed by desired state of world's populator and
// cleaned up by the reconciler.
func (rc *reconciler) syncStates(podsDir string) {
// syncStates scans the volume directories under the given pod directory.
// If the volume is not in desired state of world, this function will reconstruct
// the volume related information and put it in both the actual and desired state of worlds.
// For some volume plugins that cannot support reconstruction, it will clean up the existing
// mount points since the volume is no long needed (removed from desired state)
func (rc *reconciler) syncStates() {
// Get volumes information by reading the pod's directory
podVolumes, err := getVolumesFromPodDir(podsDir)
podVolumes, err := getVolumesFromPodDir(rc.kubeletPodsDir)
if err != nil {
glog.Errorf("Cannot get volumes from disk %v", err)
return
}
volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume)
for _, volume := range podVolumes {
reconstructedVolume, err := rc.reconstructVolume(volume)
if err != nil {
glog.Errorf("Could not construct volume information: %v", err)
if rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
continue
}
// Check if there is an pending operation for the given pod and volume.
// Need to check pending operation before checking the actual and desired
// states to avoid race condition during checking. For example, the following
// might happen if pending operation is checked after checking actual and desired states.
// 1. Checking the pod and it does not exist in either actual or desired state.
// 2. An operation for the given pod finishes and the actual state is updated.
// 3. Checking and there is no pending operation for the given pod.
// During state reconstruction period, no new volume operations could be issued. If the
// mounted path is not in either pending operation, or actual or desired states, this
// volume needs to be reconstructed back to the states.
pending := rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, reconstructedVolume.podName)
dswExist := rc.desiredStateOfWorld.PodExistsInVolume(reconstructedVolume.podName, reconstructedVolume.volumeName)
aswExist, _, _ := rc.actualStateOfWorld.PodExistsInVolume(reconstructedVolume.podName, reconstructedVolume.volumeName)
if !rc.StatesHasBeenSynced() {
// In case this is the first time to reconstruct state after kubelet starts, for a persistant volume, it must have
// been mounted before kubelet restarts because no mount operations could be started at this time (node
// status has not yet been updated before this very first syncStates finishes, so that VerifyControllerAttachedVolume will fail),
// In this case, the volume state should be put back to actual state now no matter desired state has it or not.
// This is to prevent node status from being updated to empty for attachable volumes. This might happen because
// in the case that a volume is discovered on disk, and it is part of desired state, but is then quickly deleted
// from the desired state. If in such situation, the volume is not added to the actual state, the node status updater will
// not get this volume from either actual or desired state. In turn, this might cause master controller
// detaching while the volume is still mounted.
if aswExist || !reconstructedVolume.pluginIsAttachable {
continue
}
} else {
// Check pending first since no new operations could be started at this point.
// Otherwise there might a race condition in checking actual states and pending operations
if pending || dswExist || aswExist {
continue
}
if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
continue
}
reconstructedVolume, err := rc.reconstructVolume(volume)
if err != nil {
glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err)
rc.cleanupMounts(volume)
continue
}
if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) {
glog.Warning("Volume is in pending operation, skip cleaning up mounts")
}
glog.V(2).Infof(
"Reconciler sync states: could not find pod information in desired or actual states or pending operation, update it in both states: %+v",
"Reconciler sync states: could not find pod information in desired state, update it in actual state: %+v",
reconstructedVolume)
volumesNeedUpdate[reconstructedVolume.volumeName] = reconstructedVolume
}
if len(volumesNeedUpdate) > 0 {
@ -426,7 +389,26 @@ func (rc *reconciler) syncStates(podsDir string) {
}
// Reconstruct Volume object and reconstructedVolume data structure by reading the pod's volume directories
func (rc *reconciler) cleanupMounts(volume podVolume) {
glog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points",
volume.podName, volume.volumeSpecName)
mountedVolume := operationexecutor.MountedVolume{
PodName: volume.podName,
VolumeName: v1.UniqueVolumeName(volume.volumeSpecName),
InnerVolumeSpecName: volume.volumeSpecName,
PluginName: volume.pluginName,
PodUID: types.UID(volume.podName),
}
// TODO: Currently cleanupMounts only includes UnmountVolume operation. In the next PR, we will add
// to unmount both volume and device in the same routine.
err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld)
if err != nil {
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error())
return
}
}
// Reconstruct volume data structure by reading the pod's volume directories
func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, error) {
// plugin initializations
plugin, err := rc.volumePluginMgr.FindPluginByName(volume.pluginName)
@ -438,27 +420,18 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
return nil, err
}
// Create volumeSpec
// Create pod object
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(volume.podName),
},
}
// TODO: remove feature gate check after no longer needed
var mapperPlugin volumepkg.BlockVolumePlugin
tmpSpec := &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
mapperPlugin, err = rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName)
if err != nil {
return nil, err
}
tmpSpec = &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volume.volumeMode}}}
}
volumeHandler, err := operationexecutor.NewVolumeHandler(tmpSpec, rc.operationExecutor)
mapperPlugin, err := rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName)
if err != nil {
return nil, err
}
volumeSpec, err := volumeHandler.ReconstructVolumeHandler(
volumeSpec, err := rc.operationExecutor.ReconstructVolumeOperation(
volume.volumeMode,
plugin,
mapperPlugin,
pod.UID,
@ -470,19 +443,17 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
return nil, err
}
volumeName, err := plugin.GetVolumeName(volumeSpec)
if err != nil {
return nil, err
}
var uniqueVolumeName v1.UniqueVolumeName
if attachablePlugin != nil {
uniqueVolumeName = volumehelper.GetUniqueVolumeName(volume.pluginName, volumeName)
uniqueVolumeName, err = util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
if err != nil {
return nil, err
}
} else {
uniqueVolumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
uniqueVolumeName = util.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
}
// Check existence of mount point for filesystem volume or symbolic link for block volume
isExist, checkErr := volumeHandler.CheckVolumeExistence(volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
if checkErr != nil {
return nil, err
}
@ -507,7 +478,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
// TODO: remove feature gate check after no longer needed
var volumeMapper volumepkg.BlockVolumeMapper
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock {
var newMapperErr error
if mapperPlugin != nil {
volumeMapper, newMapperErr = mapperPlugin.NewBlockVolumeMapper(
@ -530,23 +501,24 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
volumeName: uniqueVolumeName,
podName: volume.podName,
volumeSpec: volumeSpec,
// volume.volumeSpecName is actually InnerVolumeSpecName. But this information will likely to be updated in updateStates()
// by checking the desired state volumeToMount list and getting the real OuterVolumeSpecName.
// In case the pod is deleted during this period and desired state does not have this information, it will not be used
// volume.volumeSpecName is actually InnerVolumeSpecName. It will not be used
// for volume cleanup.
// TODO: in case pod is added back before reconciler starts to unmount, we can update this field from desired state information
outerVolumeSpecName: volume.volumeSpecName,
pod: pod,
pluginIsAttachable: attachablePlugin != nil,
attachablePlugin: attachablePlugin,
volumeGidValue: "",
devicePath: "",
mounter: volumeMounter,
blockVolumeMapper: volumeMapper,
// devicePath is updated during updateStates() by checking node status's VolumesAttached data.
// TODO: get device path directly from the volume mount path.
devicePath: "",
mounter: volumeMounter,
blockVolumeMapper: volumeMapper,
}
return reconstructedVolume, nil
}
func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error {
// Get the node status to retrieve volume device path information.
// updateDevicePath gets the node status to retrieve volume device path information.
func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) {
node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{})
if fetchErr != nil {
glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr)
@ -559,26 +531,42 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re
}
}
}
}
// Get the list of volumes from desired state and update OuterVolumeSpecName if the information is available
volumesToMount := rc.desiredStateOfWorld.GetVolumesToMount()
for _, volumeToMount := range volumesToMount {
if volume, exists := volumesNeedUpdate[volumeToMount.VolumeName]; exists {
volume.outerVolumeSpecName = volumeToMount.OuterVolumeSpecName
volumesNeedUpdate[volumeToMount.VolumeName] = volume
glog.V(4).Infof("Update OuterVolumeSpecName from desired state for volume (%q): %q",
volumeToMount.VolumeName, volume.outerVolumeSpecName)
func getDeviceMountPath(volume *reconstructedVolume) (string, error) {
volumeAttacher, err := volume.attachablePlugin.NewAttacher()
if volumeAttacher == nil || err != nil {
return "", err
}
deviceMountPath, err :=
volumeAttacher.GetDeviceMountPath(volume.volumeSpec)
if err != nil {
return "", err
}
if volume.blockVolumeMapper != nil {
deviceMountPath, err =
volume.blockVolumeMapper.GetGlobalMapPath(volume.volumeSpec)
if err != nil {
return "", err
}
}
return deviceMountPath, nil
}
func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error {
// Get the node status to retrieve volume device path information.
rc.updateDevicePath(volumesNeedUpdate)
for _, volume := range volumesNeedUpdate {
err := rc.actualStateOfWorld.MarkVolumeAsAttached(
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108
volume.volumeName, volume.volumeSpec, "" /* nodeName */, volume.devicePath)
if err != nil {
glog.Errorf("Could not add volume information to actual state of world: %v", err)
continue
}
err = rc.actualStateOfWorld.AddPodToVolume(
err = rc.actualStateOfWorld.MarkVolumeAsMounted(
volume.podName,
types.UID(volume.podName),
volume.volumeName,
@ -590,22 +578,19 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re
glog.Errorf("Could not add pod to volume information to actual state of world: %v", err)
continue
}
if volume.pluginIsAttachable {
err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName)
glog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName)
if volume.attachablePlugin != nil {
deviceMountPath, err := getDeviceMountPath(volume)
if err != nil {
glog.Errorf("Could not find device mount path for volume %s", volume.volumeName)
continue
}
err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName, volume.devicePath, deviceMountPath)
if err != nil {
glog.Errorf("Could not mark device is mounted to actual state of world: %v", err)
continue
}
glog.Infof("Volume: %v is mounted", volume.volumeName)
}
_, err = rc.desiredStateOfWorld.AddPodToVolume(volume.podName,
volume.pod,
volume.volumeSpec,
volume.outerVolumeSpecName,
volume.volumeGidValue)
if err != nil {
glog.Errorf("Could not add pod to volume information to desired state of world: %v", err)
glog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName)
}
}
return nil
@ -641,7 +626,7 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) {
for volumeMode, volumesDir := range volumesDirs {
var volumesDirInfo []os.FileInfo
if volumesDirInfo, err = ioutil.ReadDir(volumesDir); err != nil {
// Just skip the loop becuase given volumesDir doesn't exist depending on volumeMode
// Just skip the loop because given volumesDir doesn't exist depending on volumeMode
continue
}
for _, volumeDir := range volumesDirInfo {
@ -667,6 +652,6 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) {
}
}
}
glog.V(10).Infof("Get volumes from pod directory %q %+v", podDir, volumes)
glog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes)
return volumes, nil
}

View File

@ -36,8 +36,8 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
const (
@ -149,7 +149,7 @@ func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -227,7 +227,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
@ -306,7 +306,7 @@ func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -396,7 +396,7 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
}
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -491,7 +491,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -582,7 +582,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
@ -674,7 +674,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -776,7 +776,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -835,6 +835,8 @@ func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) {
},
}
// Enable BlockVolume feature gate
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
volumePluginMgr := &volume.VolumePluginMgr{}
@ -854,14 +856,20 @@ func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) {
},
Spec: v1.PodSpec{},
}
volumeToMount := operationexecutor.VolumeToMount{Pod: pod, VolumeSpec: &volume.Spec{}}
err := oex.MapVolume(waitForAttachTimeout, volumeToMount, asw)
volumeMode := v1.PersistentVolumeBlock
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
volumeToMount := operationexecutor.VolumeToMount{
Pod: pod,
VolumeSpec: tmpSpec}
err := oex.MountVolume(waitForAttachTimeout, volumeToMount, asw, false)
// Assert
if assert.Error(t, err) {
assert.Contains(t, err.Error(), tc.expectedErrMsg)
}
})
}
// Rollback feature gate to false.
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
}
func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
@ -882,6 +890,8 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
},
}
// Enable BlockVolume feature gate
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
volumePluginMgr := &volume.VolumePluginMgr{}
@ -893,14 +903,20 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
nil, /* fakeRecorder */
false, /* checkNodeCapabilitiesBeforeMount */
nil))
volumeToUnmount := operationexecutor.MountedVolume{PluginName: "fake-file-plugin"}
err := oex.UnmapVolume(volumeToUnmount, asw)
volumeMode := v1.PersistentVolumeBlock
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
volumeToUnmount := operationexecutor.MountedVolume{
PluginName: "fake-file-plugin",
VolumeSpec: tmpSpec}
err := oex.UnmountVolume(volumeToUnmount, asw)
// Assert
if assert.Error(t, err) {
assert.Contains(t, err.Error(), tc.expectedErrMsg)
}
})
}
// Rollback feature gate to false.
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
}
func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
@ -912,15 +928,17 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
"volumePlugin is nil": {
volumePlugins: []volume.VolumePlugin{},
expectErr: true,
expectedErrMsg: "UnmapDevice.FindMapperPluginBySpec failed",
expectedErrMsg: "UnmapDevice.FindMapperPluginByName failed",
},
"blockVolumePlugin is nil": {
volumePlugins: volumetesting.NewFakeFileVolumePlugin(),
expectErr: true,
expectedErrMsg: "UnmapDevice.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.",
expectedErrMsg: "UnmapDevice.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.",
},
}
// Enable BlockVolume feature gate
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
volumePluginMgr := &volume.VolumePluginMgr{}
@ -933,14 +951,18 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
false, /* checkNodeCapabilitiesBeforeMount */
nil))
var mounter mount.Interface
deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: &volume.Spec{}}
err := oex.UnmapDevice(deviceToDetach, asw, mounter)
volumeMode := v1.PersistentVolumeBlock
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: tmpSpec, PluginName: "fake-file-plugin"}
err := oex.UnmountDevice(deviceToDetach, asw, mounter)
// Assert
if assert.Error(t, err) {
assert.Contains(t, err.Error(), tc.expectedErrMsg)
}
})
}
// Rollback feature gate to false.
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
}
func waitForMount(
@ -949,7 +971,7 @@ func waitForMount(
volumeName v1.UniqueVolumeName,
asw cache.ActualStateOfWorld) {
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
time.Duration(500*time.Millisecond),
func() (bool, error) {
mountedVolumes := asw.GetMountedVolumes()
for _, mountedVolume := range mountedVolumes {
@ -973,7 +995,7 @@ func waitForDetach(
volumeName v1.UniqueVolumeName,
asw cache.ActualStateOfWorld) {
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
time.Duration(500*time.Millisecond),
func() (bool, error) {
if asw.VolumeExists(volumeName) {
return false, nil

View File

@ -32,7 +32,6 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/util/format"
@ -44,7 +43,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
const (
@ -153,7 +152,7 @@ func NewVolumeManager(
podStatusProvider status.PodStatusProvider,
kubeClient clientset.Interface,
volumePluginMgr *volume.VolumePluginMgr,
kubeContainerRuntime kubecontainer.Runtime,
kubeContainerRuntime container.Runtime,
mounter mount.Interface,
kubeletPodsDir string,
recorder record.EventRecorder,
@ -170,7 +169,7 @@ func NewVolumeManager(
volumePluginMgr,
recorder,
checkNodeCapabilitiesBeforeMount,
util.NewBlockVolumePathHandler())),
volumepathhandler.NewBlockVolumePathHandler())),
}
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
@ -180,6 +179,7 @@ func NewVolumeManager(
podManager,
podStatusProvider,
vm.desiredStateOfWorld,
vm.actualStateOfWorld,
kubeContainerRuntime,
keepTerminatedPodVolumes)
vm.reconciler = reconciler.NewReconciler(
@ -264,7 +264,7 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co
}
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
supplementalGroups := sets.NewString()
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
@ -333,6 +333,10 @@ func (vm *volumeManager) MarkVolumesAsReportedInUse(
}
func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
if pod == nil {
return nil
}
expectedVolumes := getExpectedVolumes(pod)
if len(expectedVolumes) == 0 {
// No volumes to verify
@ -340,13 +344,12 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
}
glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
uniquePodName := volumehelper.GetUniquePodName(pod)
uniquePodName := util.GetUniquePodName(pod)
// Some pods expect to have Setup called over and over again to update.
// Remount plugins for which this is true. (Atomically updating volumes,
// like Downward API, depend on this to update the contents of the volume).
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
vm.actualStateOfWorld.MarkRemountRequired(uniquePodName)
err := wait.Poll(
podAttachAndMountRetryInterval,
@ -357,21 +360,38 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
// Timeout expired
unmountedVolumes :=
vm.getUnmountedVolumes(uniquePodName, expectedVolumes)
// Also get unattached volumes for error message
unattachedVolumes :=
vm.getUnattachedVolumes(expectedVolumes)
if len(unmountedVolumes) == 0 {
return nil
}
return fmt.Errorf(
"timeout expired waiting for volumes to attach/mount for pod %q/%q. list of unattached/unmounted volumes=%v",
"timeout expired waiting for volumes to attach or mount for pod %q/%q. list of unmounted volumes=%v. list of unattached volumes=%v",
pod.Namespace,
pod.Name,
unmountedVolumes)
unmountedVolumes,
unattachedVolumes)
}
glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod))
return nil
}
// getUnattachedVolumes returns a list of the volumes that are expected to be attached but
// are not currently attached to the node
func (vm *volumeManager) getUnattachedVolumes(expectedVolumes []string) []string {
unattachedVolumes := []string{}
for _, volume := range expectedVolumes {
if !vm.actualStateOfWorld.VolumeExists(v1.UniqueVolumeName(volume)) {
unattachedVolumes = append(unattachedVolumes, volume)
}
}
return unattachedVolumes
}
// verifyVolumesMountedFunc returns a method that returns true when all expected
// volumes are mounted.
func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc {
@ -407,9 +427,6 @@ func filterUnmountedVolumes(mountedVolumes sets.String, expectedVolumes []string
// consider the volume setup step for this pod satisfied.
func getExpectedVolumes(pod *v1.Pod) []string {
expectedVolumes := []string{}
if pod == nil {
return expectedVolumes
}
for _, podVolume := range pod.Spec.Volumes {
expectedVolumes = append(expectedVolumes, podVolume.Name)

View File

@ -33,7 +33,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/configmap"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/pod"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
"k8s.io/kubernetes/pkg/kubelet/secret"
@ -42,8 +41,8 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
const (
@ -169,7 +168,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
Annotations: map[string]string{
volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation,
util.VolumeGidAnnotationKey: tc.gidAnnotation,
},
},
Spec: v1.PersistentVolumeSpec{
@ -188,9 +187,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
stopCh := runVolumeManager(manager)
defer func() {
close(stopCh)
}()
defer close(stopCh)
podManager.SetPods([]*v1.Pod{pod})
@ -213,7 +210,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
}
}
func newTestVolumeManager(tmpDir string, podManager pod.Manager, kubeClient clientset.Interface) VolumeManager {
func newTestVolumeManager(tmpDir string, podManager kubepod.Manager, kubeClient clientset.Interface) VolumeManager {
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
fakeRecorder := &record.FakeRecorder{}
plugMgr := &volume.VolumePluginMgr{}