mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
2
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD
generated
vendored
@ -14,6 +14,7 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
@ -21,6 +22,7 @@ go_library(
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
149
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
149
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
@ -28,6 +28,8 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
@ -58,7 +60,7 @@ type ActualStateOfWorld interface {
|
||||
// volume, reset the pod's remountRequired value.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// attached volumes, an error is returned.
|
||||
AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string) error
|
||||
AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string, volumeSpec *volume.Spec) error
|
||||
|
||||
// MarkRemountRequired marks each volume that is successfully attached and
|
||||
// mounted for the specified pod as requiring remount (if the plugin for the
|
||||
@ -148,6 +150,11 @@ type ActualStateOfWorld interface {
|
||||
// with pod's unique name. This map can be used to determine which pod is currently
|
||||
// in actual state of world.
|
||||
GetPods() map[volumetypes.UniquePodName]bool
|
||||
|
||||
// MarkFSResizeRequired marks each volume that is successfully attached and
|
||||
// mounted for the specified pod as requiring file system resize (if the plugin for the
|
||||
// volume indicates it requires file system resize).
|
||||
MarkFSResizeRequired(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName)
|
||||
}
|
||||
|
||||
// MountedVolume represents a volume that has successfully been mounted to a pod.
|
||||
@ -268,6 +275,13 @@ type mountedPod struct {
|
||||
// mapper used to block volumes support
|
||||
blockVolumeMapper volume.BlockVolumeMapper
|
||||
|
||||
// spec is the volume spec containing the specification for this volume.
|
||||
// Used to generate the volume plugin object, and passed to plugin methods.
|
||||
// In particular, the Unmount method uses spec.Name() as the volumeSpecName
|
||||
// in the mount path:
|
||||
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/
|
||||
volumeSpec *volume.Spec
|
||||
|
||||
// outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced
|
||||
// directly in the pod. If the volume was referenced through a persistent
|
||||
// volume claim, this contains the volume.Spec.Name() of the persistent
|
||||
@ -284,6 +298,10 @@ type mountedPod struct {
|
||||
|
||||
// volumeGidValue contains the value of the GID annotation, if present.
|
||||
volumeGidValue string
|
||||
|
||||
// fsResizeRequired indicates the underlying volume has been successfully
|
||||
// mounted to this pod but its size has been expanded after that.
|
||||
fsResizeRequired bool
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
|
||||
@ -303,7 +321,8 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted(
|
||||
mounter volume.Mounter,
|
||||
blockVolumeMapper volume.BlockVolumeMapper,
|
||||
outerVolumeSpecName string,
|
||||
volumeGidValue string) error {
|
||||
volumeGidValue string,
|
||||
volumeSpec *volume.Spec) error {
|
||||
return asw.AddPodToVolume(
|
||||
podName,
|
||||
podUID,
|
||||
@ -311,7 +330,8 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted(
|
||||
mounter,
|
||||
blockVolumeMapper,
|
||||
outerVolumeSpecName,
|
||||
volumeGidValue)
|
||||
volumeGidValue,
|
||||
volumeSpec)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
@ -403,7 +423,8 @@ func (asw *actualStateOfWorld) AddPodToVolume(
|
||||
mounter volume.Mounter,
|
||||
blockVolumeMapper volume.BlockVolumeMapper,
|
||||
outerVolumeSpecName string,
|
||||
volumeGidValue string) error {
|
||||
volumeGidValue string,
|
||||
volumeSpec *volume.Spec) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@ -423,6 +444,7 @@ func (asw *actualStateOfWorld) AddPodToVolume(
|
||||
blockVolumeMapper: blockVolumeMapper,
|
||||
outerVolumeSpecName: outerVolumeSpecName,
|
||||
volumeGidValue: volumeGidValue,
|
||||
volumeSpec: volumeSpec,
|
||||
}
|
||||
}
|
||||
|
||||
@ -433,6 +455,34 @@ func (asw *actualStateOfWorld) AddPodToVolume(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsResized(
|
||||
podName volumetypes.UniquePodName,
|
||||
volumeName v1.UniqueVolumeName) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if !volumeExists {
|
||||
return fmt.Errorf(
|
||||
"no volume with the name %q exists in the list of attached volumes",
|
||||
volumeName)
|
||||
}
|
||||
|
||||
podObj, podExists := volumeObj.mountedPods[podName]
|
||||
if !podExists {
|
||||
return fmt.Errorf(
|
||||
"no pod with the name %q exists in the mounted pods list of volume %s",
|
||||
podName,
|
||||
volumeName)
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized",
|
||||
volumeName, podObj.outerVolumeSpecName, podName)
|
||||
podObj.fsResizeRequired = false
|
||||
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkRemountRequired(
|
||||
podName volumetypes.UniquePodName) {
|
||||
asw.Lock()
|
||||
@ -444,7 +494,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired(
|
||||
}
|
||||
|
||||
volumePlugin, err :=
|
||||
asw.volumePluginMgr.FindPluginBySpec(volumeObj.spec)
|
||||
asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
// Log and continue processing
|
||||
glog.Errorf(
|
||||
@ -452,7 +502,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired(
|
||||
podObj.podName,
|
||||
podObj.podUID,
|
||||
volumeObj.volumeName,
|
||||
volumeObj.spec.Name())
|
||||
podObj.volumeSpec.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
@ -464,6 +514,46 @@ func (asw *actualStateOfWorld) MarkRemountRequired(
|
||||
}
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkFSResizeRequired(
|
||||
volumeName v1.UniqueVolumeName,
|
||||
podName volumetypes.UniquePodName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
volumeObj, exist := asw.attachedVolumes[volumeName]
|
||||
if !exist {
|
||||
glog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName)
|
||||
return
|
||||
}
|
||||
|
||||
podObj, exist := volumeObj.mountedPods[podName]
|
||||
if !exist {
|
||||
glog.Warningf("MarkFSResizeRequired for volume %s failed "+
|
||||
"as pod(%s) not exist", volumeName, podName)
|
||||
return
|
||||
}
|
||||
|
||||
volumePlugin, err :=
|
||||
asw.volumePluginMgr.FindExpandablePluginBySpec(podObj.volumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
// Log and continue processing
|
||||
glog.Errorf(
|
||||
"MarkFSResizeRequired failed to find expandable plugin for pod %q volume: %q (volSpecName: %q)",
|
||||
podObj.podName,
|
||||
volumeObj.volumeName,
|
||||
podObj.volumeSpec.Name())
|
||||
return
|
||||
}
|
||||
|
||||
if volumePlugin.RequiresFSResize() {
|
||||
if !podObj.fsResizeRequired {
|
||||
glog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize",
|
||||
volumeName, podObj.outerVolumeSpecName, podName)
|
||||
podObj.fsResizeRequired = true
|
||||
}
|
||||
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
|
||||
}
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) SetVolumeGloballyMounted(
|
||||
volumeName v1.UniqueVolumeName, globallyMounted bool, devicePath, deviceMountPath string) error {
|
||||
asw.Lock()
|
||||
@ -535,8 +625,14 @@ func (asw *actualStateOfWorld) PodExistsInVolume(
|
||||
}
|
||||
|
||||
podObj, podExists := volumeObj.mountedPods[podName]
|
||||
if podExists && podObj.remountRequired {
|
||||
return true, volumeObj.devicePath, newRemountRequiredError(volumeObj.volumeName, podObj.podName)
|
||||
if podExists {
|
||||
if podObj.remountRequired {
|
||||
return true, volumeObj.devicePath, newRemountRequiredError(volumeObj.volumeName, podObj.podName)
|
||||
}
|
||||
if podObj.fsResizeRequired &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) {
|
||||
return true, volumeObj.devicePath, newFsResizeRequiredError(volumeObj.volumeName, podObj.podName)
|
||||
}
|
||||
}
|
||||
|
||||
return podExists, volumeObj.devicePath, nil
|
||||
@ -546,8 +642,8 @@ func (asw *actualStateOfWorld) VolumeExistsWithSpecName(podName volumetypes.Uniq
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
for _, volumeObj := range asw.attachedVolumes {
|
||||
for name := range volumeObj.mountedPods {
|
||||
if podName == name && volumeObj.spec.Name() == volumeSpecName {
|
||||
for name, podObj := range volumeObj.mountedPods {
|
||||
if podName == name && podObj.volumeSpec.Name() == volumeSpecName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -705,6 +801,35 @@ func newRemountRequiredError(
|
||||
}
|
||||
}
|
||||
|
||||
// fsResizeRequiredError is an error returned when PodExistsInVolume() found
|
||||
// volume/pod attached/mounted but fsResizeRequired was true, indicating the
|
||||
// given volume receives an resize request after attached/mounted.
|
||||
type fsResizeRequiredError struct {
|
||||
volumeName v1.UniqueVolumeName
|
||||
podName volumetypes.UniquePodName
|
||||
}
|
||||
|
||||
func (err fsResizeRequiredError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"volumeName %q mounted to %q needs to resize file system",
|
||||
err.volumeName, err.podName)
|
||||
}
|
||||
|
||||
func newFsResizeRequiredError(
|
||||
volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) error {
|
||||
return fsResizeRequiredError{
|
||||
volumeName: volumeName,
|
||||
podName: podName,
|
||||
}
|
||||
}
|
||||
|
||||
// IsFSResizeRequiredError returns true if the specified error is a
|
||||
// fsResizeRequiredError.
|
||||
func IsFSResizeRequiredError(err error) bool {
|
||||
_, ok := err.(fsResizeRequiredError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// getMountedVolume constructs and returns a MountedVolume object from the given
|
||||
// mountedPod and attachedVolume objects.
|
||||
func getMountedVolume(
|
||||
@ -713,13 +838,13 @@ func getMountedVolume(
|
||||
MountedVolume: operationexecutor.MountedVolume{
|
||||
PodName: mountedPod.podName,
|
||||
VolumeName: attachedVolume.volumeName,
|
||||
InnerVolumeSpecName: attachedVolume.spec.Name(),
|
||||
InnerVolumeSpecName: mountedPod.volumeSpec.Name(),
|
||||
OuterVolumeSpecName: mountedPod.outerVolumeSpecName,
|
||||
PluginName: attachedVolume.pluginName,
|
||||
PodUID: mountedPod.podUID,
|
||||
Mounter: mountedPod.mounter,
|
||||
BlockVolumeMapper: mountedPod.blockVolumeMapper,
|
||||
VolumeGidValue: mountedPod.volumeGidValue,
|
||||
VolumeSpec: attachedVolume.spec,
|
||||
VolumeSpec: mountedPod.volumeSpec,
|
||||
DeviceMountPath: attachedVolume.deviceMountPath}}
|
||||
}
|
||||
|
159
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go
generated
vendored
159
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go
generated
vendored
@ -56,10 +56,13 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) {
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, _ := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Act
|
||||
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
@ -143,9 +146,12 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) {
|
||||
},
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
generatedVolumeName, _ := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
@ -192,6 +198,9 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
@ -211,7 +220,7 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
|
||||
|
||||
// Act
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */, volumeSpec)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
@ -257,6 +266,9 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(
|
||||
plugin, volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
@ -275,14 +287,14 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||
}
|
||||
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */, volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Act
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */, volumeSpec)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
@ -296,6 +308,119 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||
verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName, volumeSpec.Name(), asw)
|
||||
}
|
||||
|
||||
// Populates data struct with a volume
|
||||
// Calls AddPodToVolume() twice to add the same pod to the volume
|
||||
// Verifies volume/pod combo exist using PodExistsInVolume() and the second call
|
||||
// did not fail.
|
||||
func Test_AddTwoPodsToVolume_Positive(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||
devicePath := "fake/device/path"
|
||||
|
||||
pod1 := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name-1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod2 := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name-2",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeSpec1 := &volume.Spec{Volume: &pod1.Spec.Volumes[0]}
|
||||
volumeSpec2 := &volume.Spec{Volume: &pod2.Spec.Volumes[0]}
|
||||
generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec(
|
||||
plugin, volumeSpec1)
|
||||
generatedVolumeName2, err := util.GetUniqueVolumeNameFromSpec(
|
||||
plugin, volumeSpec2)
|
||||
|
||||
if generatedVolumeName1 != generatedVolumeName2 {
|
||||
t.Fatalf(
|
||||
"Unique volume names should be the same. unique volume name 1: <%q> unique volume name 2: <%q>, spec1 %v, spec2 %v",
|
||||
generatedVolumeName1,
|
||||
generatedVolumeName2, volumeSpec1, volumeSpec2)
|
||||
}
|
||||
|
||||
err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
podName1 := util.GetUniquePodName(pod1)
|
||||
|
||||
mounter1, err := plugin.NewMounter(volumeSpec1, pod1, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewMounter failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
mapper1, err := plugin.NewBlockVolumeMapper(volumeSpec1, pod1, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewBlockVolumeMapper failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
err = asw.AddPodToVolume(
|
||||
podName1, pod1.UID, generatedVolumeName1, mounter1, mapper1, volumeSpec1.Name(), "" /* volumeGidValue */, volumeSpec1)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
podName2 := util.GetUniquePodName(pod2)
|
||||
|
||||
mounter2, err := plugin.NewMounter(volumeSpec2, pod2, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewMounter failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
mapper2, err := plugin.NewBlockVolumeMapper(volumeSpec2, pod2, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewBlockVolumeMapper failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
err = asw.AddPodToVolume(
|
||||
podName2, pod2.UID, generatedVolumeName1, mounter2, mapper2, volumeSpec2.Name(), "" /* volumeGidValue */, volumeSpec2)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsAsw(t, generatedVolumeName1, true /* shouldExist */, asw)
|
||||
verifyVolumeDoesntExistInUnmountedVolumes(t, generatedVolumeName1, asw)
|
||||
verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName1, asw)
|
||||
verifyPodExistsInVolumeAsw(t, podName1, generatedVolumeName1, "fake/device/path" /* expectedDevicePath */, asw)
|
||||
verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName1, volumeSpec1.Name(), asw)
|
||||
verifyPodExistsInVolumeAsw(t, podName2, generatedVolumeName2, "fake/device/path" /* expectedDevicePath */, asw)
|
||||
verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName2, volumeSpec2.Name(), asw)
|
||||
verifyVolumeSpecNameInVolumeAsw(t, podName1, []*volume.Spec{volumeSpec1}, asw)
|
||||
verifyVolumeSpecNameInVolumeAsw(t, podName2, []*volume.Spec{volumeSpec2}, asw)
|
||||
|
||||
}
|
||||
|
||||
// Calls AddPodToVolume() to add pod to empty data struct
|
||||
// Verifies call fails with "volume does not exist" error.
|
||||
func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
|
||||
@ -356,7 +481,7 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
|
||||
|
||||
// Act
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, volumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
podName, pod.UID, volumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */, volumeSpec)
|
||||
|
||||
// Assert
|
||||
if err == nil {
|
||||
@ -405,6 +530,9 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
|
||||
devicePath := "fake/device/path"
|
||||
deviceMountPath := "fake/device/mount/path"
|
||||
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
@ -580,3 +708,18 @@ func verifyVolumeDoesntExistWithSpecNameInVolumeAsw(
|
||||
podExistsInVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyVolumeSpecNameInVolumeAsw(
|
||||
t *testing.T,
|
||||
podToCheck volumetypes.UniquePodName,
|
||||
volumeSpecs []*volume.Spec,
|
||||
asw ActualStateOfWorld) {
|
||||
mountedVolumes :=
|
||||
asw.GetMountedVolumesForPod(podToCheck)
|
||||
|
||||
for i, volume := range mountedVolumes {
|
||||
if volume.InnerVolumeSpecName != volumeSpecs[i].Name() {
|
||||
t.Fatalf("Volume spec name does not match Expected: <%q> Actual: <%q>", volumeSpecs[i].Name(), volume.InnerVolumeSpecName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
@ -171,7 +171,7 @@ type podToMount struct {
|
||||
// generate the volume plugin object, and passed to plugin methods.
|
||||
// For non-PVC volumes this is the same as defined in the pod object. For
|
||||
// PVC volumes it is from the dereferenced PV object.
|
||||
spec *volume.Spec
|
||||
volumeSpec *volume.Spec
|
||||
|
||||
// outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced
|
||||
// directly in the pod. If the volume was referenced through a persistent
|
||||
@ -220,16 +220,14 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
volumeName = util.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
|
||||
}
|
||||
|
||||
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
||||
if !volumeExists {
|
||||
volumeObj = volumeToMount{
|
||||
if _, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
|
||||
dsw.volumesToMount[volumeName] = volumeToMount{
|
||||
volumeName: volumeName,
|
||||
podsToMount: make(map[types.UniquePodName]podToMount),
|
||||
pluginIsAttachable: attachable,
|
||||
volumeGidValue: volumeGidValue,
|
||||
reportedInUse: false,
|
||||
}
|
||||
dsw.volumesToMount[volumeName] = volumeObj
|
||||
}
|
||||
|
||||
// Create new podToMount object. If it already exists, it is refreshed with
|
||||
@ -238,7 +236,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
dsw.volumesToMount[volumeName].podsToMount[podName] = podToMount{
|
||||
podName: podName,
|
||||
pod: pod,
|
||||
spec: volumeSpec,
|
||||
volumeSpec: volumeSpec,
|
||||
outerVolumeSpecName: outerVolumeSpecName,
|
||||
}
|
||||
return volumeName, nil
|
||||
@ -314,7 +312,7 @@ func (dsw *desiredStateOfWorld) VolumeExistsWithSpecName(podName types.UniquePod
|
||||
defer dsw.RUnlock()
|
||||
for _, volumeObj := range dsw.volumesToMount {
|
||||
for name, podObj := range volumeObj.podsToMount {
|
||||
if podName == name && podObj.spec.Name() == volumeSpecName {
|
||||
if podName == name && podObj.volumeSpec.Name() == volumeSpecName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -351,7 +349,7 @@ func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount {
|
||||
VolumeName: volumeName,
|
||||
PodName: podName,
|
||||
Pod: podObj.pod,
|
||||
VolumeSpec: podObj.spec,
|
||||
VolumeSpec: podObj.volumeSpec,
|
||||
PluginIsAttachable: volumeObj.pluginIsAttachable,
|
||||
OuterVolumeSpecName: podObj.outerVolumeSpecName,
|
||||
VolumeGidValue: volumeObj.volumeGidValue,
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD
generated
vendored
@ -25,6 +25,7 @@ go_library(
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
@ -49,6 +50,7 @@ go_test(
|
||||
srcs = ["desired_state_of_world_populator_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/configmap:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
@ -61,6 +63,7 @@ go_test(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -182,12 +183,26 @@ func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool {
|
||||
// Iterate through all pods and add to desired state of world if they don't
|
||||
// exist but should
|
||||
func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() {
|
||||
// Map unique pod name to outer volume name to MountedVolume.
|
||||
mountedVolumesForPod := make(map[volumetypes.UniquePodName]map[string]cache.MountedVolume)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) {
|
||||
for _, mountedVolume := range dswp.actualStateOfWorld.GetMountedVolumes() {
|
||||
mountedVolumes, exist := mountedVolumesForPod[mountedVolume.PodName]
|
||||
if !exist {
|
||||
mountedVolumes = make(map[string]cache.MountedVolume)
|
||||
mountedVolumesForPod[mountedVolume.PodName] = mountedVolumes
|
||||
}
|
||||
mountedVolumes[mountedVolume.OuterVolumeSpecName] = mountedVolume
|
||||
}
|
||||
}
|
||||
|
||||
processedVolumesForFSResize := sets.NewString()
|
||||
for _, pod := range dswp.podManager.GetPods() {
|
||||
if dswp.isPodTerminated(pod) {
|
||||
// Do not (re)add volumes for terminated pods
|
||||
continue
|
||||
}
|
||||
dswp.processPodVolumes(pod)
|
||||
dswp.processPodVolumes(pod, mountedVolumesForPod, processedVolumesForFSResize)
|
||||
}
|
||||
}
|
||||
|
||||
@ -245,6 +260,10 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !dswp.actualStateOfWorld.VolumeExists(volumeToMount.VolumeName) && podExists {
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Actual state has not yet has this information skip removing volume from desired state", ""))
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", ""))
|
||||
|
||||
dswp.desiredStateOfWorld.DeletePodFromVolume(
|
||||
@ -255,7 +274,10 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
|
||||
// processPodVolumes processes the volumes in the given pod and adds them to the
|
||||
// desired state of the world.
|
||||
func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
|
||||
func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
|
||||
pod *v1.Pod,
|
||||
mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume,
|
||||
processedVolumesForFSResize sets.String) {
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
@ -270,7 +292,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
|
||||
|
||||
// Process volume spec for each volume defined in pod
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
volumeSpec, volumeGidValue, err :=
|
||||
pvc, volumeSpec, volumeGidValue, err :=
|
||||
dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
if err != nil {
|
||||
glog.Errorf(
|
||||
@ -300,6 +322,11 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
|
||||
podVolume.Name,
|
||||
volumeSpec.Name(),
|
||||
uniquePodName)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) {
|
||||
dswp.checkVolumeFSResize(pod, podVolume, pvc, volumeSpec,
|
||||
uniquePodName, mountedVolumesForPod, processedVolumesForFSResize)
|
||||
}
|
||||
}
|
||||
|
||||
// some of the volume additions may have failed, should not mark this pod as fully processed
|
||||
@ -312,6 +339,106 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
|
||||
|
||||
}
|
||||
|
||||
// checkVolumeFSResize checks whether a PVC mounted by the pod requires file
|
||||
// system resize or not. If so, marks this volume as fsResizeRequired in ASW.
|
||||
// - mountedVolumesForPod stores all mounted volumes in ASW, because online
|
||||
// volume resize only considers mounted volumes.
|
||||
// - processedVolumesForFSResize stores all volumes we have checked in current loop,
|
||||
// because file system resize operation is a global operation for volume, so
|
||||
// we only need to check it once if more than one pod use it.
|
||||
func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
|
||||
pod *v1.Pod,
|
||||
podVolume v1.Volume,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
volumeSpec *volume.Spec,
|
||||
uniquePodName volumetypes.UniquePodName,
|
||||
mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume,
|
||||
processedVolumesForFSResize sets.String) {
|
||||
if podVolume.PersistentVolumeClaim == nil {
|
||||
// Only PVC supports resize operation.
|
||||
return
|
||||
}
|
||||
uniqueVolumeName, exist := getUniqueVolumeName(uniquePodName, podVolume.Name, mountedVolumesForPod)
|
||||
if !exist {
|
||||
// Volume not exist in ASW, we assume it hasn't been mounted yet. If it needs resize,
|
||||
// it will be handled as offline resize(if it indeed hasn't been mounted yet),
|
||||
// or online resize in subsequent loop(after we confirm it has been mounted).
|
||||
return
|
||||
}
|
||||
fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec)
|
||||
if err != nil {
|
||||
glog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v",
|
||||
uniqueVolumeName, podVolume.Name, err)
|
||||
return
|
||||
}
|
||||
if !fsVolume {
|
||||
glog.V(5).Infof("Block mode volume needn't to check file system resize request")
|
||||
return
|
||||
}
|
||||
if processedVolumesForFSResize.Has(string(uniqueVolumeName)) {
|
||||
// File system resize operation is a global operation for volume,
|
||||
// so we only need to check it once if more than one pod use it.
|
||||
return
|
||||
}
|
||||
if mountedReadOnlyByPod(podVolume, pod) {
|
||||
// This volume is used as read only by this pod, we don't perform resize for read only volumes.
|
||||
glog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+
|
||||
"as the volume is mounted as readonly", podVolume.Name, pod.Namespace, pod.Name)
|
||||
return
|
||||
}
|
||||
if volumeRequiresFSResize(pvc, volumeSpec.PersistentVolume) {
|
||||
dswp.actualStateOfWorld.MarkFSResizeRequired(uniqueVolumeName, uniquePodName)
|
||||
}
|
||||
processedVolumesForFSResize.Insert(string(uniqueVolumeName))
|
||||
}
|
||||
|
||||
func mountedReadOnlyByPod(podVolume v1.Volume, pod *v1.Pod) bool {
|
||||
if podVolume.PersistentVolumeClaim.ReadOnly {
|
||||
return true
|
||||
}
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !mountedReadOnlyByContainer(podVolume.Name, &container) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if !mountedReadOnlyByContainer(podVolume.Name, &container) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func mountedReadOnlyByContainer(volumeName string, container *v1.Container) bool {
|
||||
for _, volumeMount := range container.VolumeMounts {
|
||||
if volumeMount.Name == volumeName && !volumeMount.ReadOnly {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getUniqueVolumeName(
|
||||
podName volumetypes.UniquePodName,
|
||||
outerVolumeSpecName string,
|
||||
mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume) (v1.UniqueVolumeName, bool) {
|
||||
mountedVolumes, exist := mountedVolumesForPod[podName]
|
||||
if !exist {
|
||||
return "", false
|
||||
}
|
||||
mountedVolume, exist := mountedVolumes[outerVolumeSpecName]
|
||||
if !exist {
|
||||
return "", false
|
||||
}
|
||||
return mountedVolume.VolumeName, true
|
||||
}
|
||||
|
||||
func volumeRequiresFSResize(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) bool {
|
||||
capacity := pvc.Status.Capacity[v1.ResourceStorage]
|
||||
requested := pv.Spec.Capacity[v1.ResourceStorage]
|
||||
return requested.Cmp(capacity) > 0
|
||||
}
|
||||
|
||||
// podPreviouslyProcessed returns true if the volumes for this pod have already
|
||||
// been processed by the populator
|
||||
func (dswp *desiredStateOfWorldPopulator) podPreviouslyProcessed(
|
||||
@ -346,7 +473,7 @@ func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
|
||||
// specified volume. It dereference any PVC to get PV objects, if needed.
|
||||
// Returns an error if unable to obtain the volume at this time.
|
||||
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*volume.Spec, string, error) {
|
||||
podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
|
||||
if pvcSource :=
|
||||
podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
glog.V(5).Infof(
|
||||
@ -355,15 +482,16 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
pvcSource.ClaimName)
|
||||
|
||||
// If podVolume is a PVC, fetch the real PV behind the claim
|
||||
pvName, pvcUID, err := dswp.getPVCExtractPV(
|
||||
pvc, err := dswp.getPVCExtractPV(
|
||||
podNamespace, pvcSource.ClaimName)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf(
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"error processing PVC %q/%q: %v",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
err)
|
||||
}
|
||||
pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID
|
||||
|
||||
glog.V(5).Infof(
|
||||
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
|
||||
@ -376,7 +504,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
volumeSpec, volumeGidValue, err :=
|
||||
dswp.getPVSpec(pvName, pvcSource.ReadOnly, pvcUID)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf(
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"error processing PVC %q/%q: %v",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
@ -395,11 +523,11 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := util.GetVolumeMode(volumeSpec)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, nil, "", err
|
||||
}
|
||||
// Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem
|
||||
if mountsMap[podVolume.Name] && volumeMode != v1.PersistentVolumeFilesystem {
|
||||
return nil, "", fmt.Errorf(
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"Volume %q has volumeMode %q, but is specified in volumeMounts for pod %q/%q",
|
||||
podVolume.Name,
|
||||
volumeMode,
|
||||
@ -408,7 +536,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
}
|
||||
// Error if a container has volumeDevices but the volumeMode of PVC isn't Block
|
||||
if devicesMap[podVolume.Name] && volumeMode != v1.PersistentVolumeBlock {
|
||||
return nil, "", fmt.Errorf(
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"Volume %q has volumeMode %q, but is specified in volumeDevices for pod %q/%q",
|
||||
podVolume.Name,
|
||||
volumeMode,
|
||||
@ -416,13 +544,13 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
podName)
|
||||
}
|
||||
}
|
||||
return volumeSpec, volumeGidValue, nil
|
||||
return pvc, volumeSpec, volumeGidValue, nil
|
||||
}
|
||||
|
||||
// Do not return the original volume object, since the source could mutate it
|
||||
clonedPodVolume := podVolume.DeepCopy()
|
||||
|
||||
return volume.NewSpecFromVolume(clonedPodVolume), "", nil
|
||||
return nil, volume.NewSpecFromVolume(clonedPodVolume), "", nil
|
||||
}
|
||||
|
||||
// getPVCExtractPV fetches the PVC object with the given namespace and name from
|
||||
@ -430,11 +558,11 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
// it is pointing to and returns it.
|
||||
// An error is returned if the PVC object's phase is not "Bound".
|
||||
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
||||
namespace string, claimName string) (string, types.UID, error) {
|
||||
namespace string, claimName string) (*v1.PersistentVolumeClaim, error) {
|
||||
pvc, err :=
|
||||
dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
|
||||
if err != nil || pvc == nil {
|
||||
return "", "", fmt.Errorf(
|
||||
return nil, fmt.Errorf(
|
||||
"failed to fetch PVC %s/%s from API server. err=%v",
|
||||
namespace,
|
||||
claimName,
|
||||
@ -451,7 +579,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
||||
// It should happen only in very rare case when scheduler schedules
|
||||
// a pod and user deletes a PVC that's used by it at the same time.
|
||||
if pvc.ObjectMeta.DeletionTimestamp != nil {
|
||||
return "", "", fmt.Errorf(
|
||||
return nil, fmt.Errorf(
|
||||
"can't start pod because PVC %s/%s is being deleted",
|
||||
namespace,
|
||||
claimName)
|
||||
@ -460,7 +588,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
||||
|
||||
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
|
||||
return "", "", fmt.Errorf(
|
||||
return nil, fmt.Errorf(
|
||||
"PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
|
||||
namespace,
|
||||
claimName,
|
||||
@ -468,7 +596,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
||||
pvc.Spec.VolumeName)
|
||||
}
|
||||
|
||||
return pvc.Spec.VolumeName, pvc.UID, nil
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
// getPVSpec fetches the PV object with the given name from the API server
|
||||
|
@ -20,12 +20,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
@ -111,6 +115,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||
}
|
||||
podGet.Status.Phase = v1.PodFailed
|
||||
|
||||
fakePodManager.DeletePod(pod)
|
||||
//pod is added to fakePodManager but fakeRuntime can not get the pod,so here findAndRemoveDeletedPods() will remove the pod and volumes it is mounted
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
|
||||
@ -220,7 +225,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods_Valid_Block_VolumeDevices(t
|
||||
t.Fatalf("Failed to get pod by pod name: %s and namespace: %s", pod.Name, pod.Namespace)
|
||||
}
|
||||
podGet.Status.Phase = v1.PodFailed
|
||||
|
||||
fakePodManager.DeletePod(pod)
|
||||
//pod is added to fakePodManager but fakeRuntime can not get the pod,so here findAndRemoveDeletedPods() will remove the pod and volumes it is mounted
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
|
||||
@ -293,7 +298,7 @@ func TestCreateVolumeSpec_Valid_File_VolumeMounts(t *testing.T) {
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
volumeSpec, _, err :=
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
@ -342,7 +347,7 @@ func TestCreateVolumeSpec_Valid_Block_VolumeDevices(t *testing.T) {
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
volumeSpec, _, err :=
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
@ -394,7 +399,7 @@ func TestCreateVolumeSpec_Invalid_File_VolumeDevices(t *testing.T) {
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
volumeSpec, _, err :=
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
@ -446,7 +451,7 @@ func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) {
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
volumeSpec, _, err :=
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
@ -458,6 +463,155 @@ func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func TestCheckVolumeFSResize(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{RBD: &v1.RBDPersistentVolumeSource{}},
|
||||
Capacity: volumeCapacity(1),
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "dswp-test-volume-name",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: volumeCapacity(1),
|
||||
},
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
Capacity: volumeCapacity(1),
|
||||
},
|
||||
}
|
||||
dswp, fakePodManager, fakeDSW := createDswpWithVolume(t, pv, pvc)
|
||||
fakeASW := dswp.actualStateOfWorld
|
||||
|
||||
// create pod
|
||||
containers := []v1.Container{
|
||||
{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "dswp-test-volume-name",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "file-bound", containers)
|
||||
uniquePodName := types.UniquePodName(pod.UID)
|
||||
uniqueVolumeName := v1.UniqueVolumeName("fake-plugin/" + pod.Spec.Volumes[0].Name)
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
// Fill the dsw to contains volumes and pods.
|
||||
dswp.findAndAddNewPods()
|
||||
reconcileASW(fakeASW, fakeDSW, t)
|
||||
|
||||
// No resize request for volume, volumes in ASW shouldn't be marked as fsResizeRequired.
|
||||
setExpandOnlinePersistentVolumesFeatureGate("true", t)
|
||||
resizeRequiredVolumes := reprocess(dswp, uniquePodName, fakeDSW, fakeASW)
|
||||
if len(resizeRequiredVolumes) > 0 {
|
||||
t.Fatalf("No resize request for any volumes, but found resize required volumes in ASW: %v", resizeRequiredVolumes)
|
||||
}
|
||||
|
||||
// Add a resize request to volume.
|
||||
pv.Spec.Capacity = volumeCapacity(2)
|
||||
pvc.Spec.Resources.Requests = volumeCapacity(2)
|
||||
|
||||
// Disable the feature gate, so volume shouldn't be marked as fsResizeRequired.
|
||||
setExpandOnlinePersistentVolumesFeatureGate("false", t)
|
||||
resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW)
|
||||
if len(resizeRequiredVolumes) > 0 {
|
||||
t.Fatalf("Feature gate disabled, but found resize required volumes in ASW: %v", resizeRequiredVolumes)
|
||||
}
|
||||
|
||||
// Make volume used as ReadOnly, so volume shouldn't be marked as fsResizeRequired.
|
||||
setExpandOnlinePersistentVolumesFeatureGate("true", t)
|
||||
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
||||
resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW)
|
||||
if len(resizeRequiredVolumes) > 0 {
|
||||
t.Fatalf("volume mounted as ReadOnly, but found resize required volumes in ASW: %v", resizeRequiredVolumes)
|
||||
}
|
||||
|
||||
// Clear ASW, so volume shouldn't be marked as fsResizeRequired because they are not mounted.
|
||||
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = false
|
||||
clearASW(fakeASW, fakeDSW, t)
|
||||
resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW)
|
||||
if len(resizeRequiredVolumes) > 0 {
|
||||
t.Fatalf("volume hasn't been mounted, but found resize required volumes in ASW: %v", resizeRequiredVolumes)
|
||||
}
|
||||
|
||||
// volume in ASW should be marked as fsResizeRequired.
|
||||
reconcileASW(fakeASW, fakeDSW, t)
|
||||
resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW)
|
||||
if len(resizeRequiredVolumes) == 0 {
|
||||
t.Fatalf("Request resize for volume, but volume in ASW hasn't been marked as fsResizeRequired")
|
||||
}
|
||||
if len(resizeRequiredVolumes) != 1 {
|
||||
t.Fatalf("Some unexpected volumes are marked as fsResizeRequired: %v", resizeRequiredVolumes)
|
||||
}
|
||||
if resizeRequiredVolumes[0] != uniqueVolumeName {
|
||||
t.Fatalf("Mark wrong volume as fsResizeRequired: %s", resizeRequiredVolumes[0])
|
||||
}
|
||||
}
|
||||
|
||||
func volumeCapacity(size int) v1.ResourceList {
|
||||
return v1.ResourceList{v1.ResourceStorage: resource.MustParse(fmt.Sprintf("%dGi", size))}
|
||||
}
|
||||
|
||||
func setExpandOnlinePersistentVolumesFeatureGate(value string, t *testing.T) {
|
||||
err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%s", features.ExpandInUsePersistentVolumes, value))
|
||||
if err != nil {
|
||||
t.Fatalf("Set ExpandInUsePersistentVolumes feature gate to %s failed: %v", value, err)
|
||||
}
|
||||
}
|
||||
|
||||
func reconcileASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) {
|
||||
for _, volumeToMount := range dsw.GetVolumesToMount() {
|
||||
err := asw.MarkVolumeAsAttached(volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when MarkVolumeAsAttached: %v", err)
|
||||
}
|
||||
err = asw.MarkVolumeAsMounted(volumeToMount.PodName, volumeToMount.Pod.UID,
|
||||
volumeToMount.VolumeName, nil, nil, volumeToMount.OuterVolumeSpecName, volumeToMount.VolumeGidValue, volumeToMount.VolumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when MarkVolumeAsMounted: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func clearASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) {
|
||||
for _, volumeToMount := range dsw.GetVolumesToMount() {
|
||||
err := asw.MarkVolumeAsUnmounted(volumeToMount.PodName, volumeToMount.VolumeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when MarkVolumeAsUnmounted: %v", err)
|
||||
}
|
||||
}
|
||||
for _, volumeToMount := range dsw.GetVolumesToMount() {
|
||||
asw.MarkVolumeAsDetached(volumeToMount.VolumeName, "")
|
||||
}
|
||||
}
|
||||
|
||||
func reprocess(dswp *desiredStateOfWorldPopulator, uniquePodName types.UniquePodName,
|
||||
dsw cache.DesiredStateOfWorld, asw cache.ActualStateOfWorld) []v1.UniqueVolumeName {
|
||||
dswp.ReprocessPod(uniquePodName)
|
||||
dswp.findAndAddNewPods()
|
||||
return getResizeRequiredVolumes(dsw, asw)
|
||||
}
|
||||
|
||||
func getResizeRequiredVolumes(dsw cache.DesiredStateOfWorld, asw cache.ActualStateOfWorld) []v1.UniqueVolumeName {
|
||||
resizeRequiredVolumes := []v1.UniqueVolumeName{}
|
||||
for _, volumeToMount := range dsw.GetVolumesToMount() {
|
||||
_, _, err := asw.PodExistsInVolume(volumeToMount.PodName, volumeToMount.VolumeName)
|
||||
if cache.IsFSResizeRequiredError(err) {
|
||||
resizeRequiredVolumes = append(resizeRequiredVolumes, volumeToMount.VolumeName)
|
||||
}
|
||||
}
|
||||
return resizeRequiredVolumes
|
||||
}
|
||||
|
||||
func verifyVolumeExistsInVolumesToMount(t *testing.T, expectedVolumeName v1.UniqueVolumeName, expectReportedInUse bool, dsw cache.DesiredStateOfWorld) {
|
||||
volumesToMount := dsw.GetVolumesToMount()
|
||||
for _, volume := range volumesToMount {
|
||||
@ -522,7 +676,7 @@ func createDswpWithVolume(t *testing.T, pv *v1.PersistentVolume, pvc *v1.Persist
|
||||
fakeSecretManager := secret.NewFakeManager()
|
||||
fakeConfigMapManager := configmap.NewFakeManager()
|
||||
fakePodManager := kubepod.NewBasicPodManager(
|
||||
podtest.NewFakeMirrorClient(), fakeSecretManager, fakeConfigMapManager)
|
||||
podtest.NewFakeMirrorClient(), fakeSecretManager, fakeConfigMapManager, podtest.NewMockCheckpointManager())
|
||||
|
||||
fakesDSW := cache.NewDesiredStateOfWorld(fakeVolumePluginMgr)
|
||||
fakeASW := cache.NewActualStateOfWorld("fake", fakeVolumePluginMgr)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
@ -38,6 +38,7 @@ go_test(
|
||||
srcs = ["reconciler_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
55
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
@ -169,7 +169,7 @@ func (rc *reconciler) reconcile() {
|
||||
// Volume is mounted, unmount it
|
||||
glog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", ""))
|
||||
err := rc.operationExecutor.UnmountVolume(
|
||||
mountedVolume.MountedVolume, rc.actualStateOfWorld)
|
||||
mountedVolume.MountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
@ -254,6 +254,22 @@ func (rc *reconciler) reconcile() {
|
||||
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr))
|
||||
}
|
||||
}
|
||||
} else if cache.IsFSResizeRequiredError(err) &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) {
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", ""))
|
||||
err := rc.operationExecutor.ExpandVolumeFSWithoutUnmounting(
|
||||
volumeToMount.VolumeToMount,
|
||||
rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", ""))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -357,21 +373,39 @@ func (rc *reconciler) syncStates() {
|
||||
return
|
||||
}
|
||||
volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume)
|
||||
volumeNeedReport := []v1.UniqueVolumeName{}
|
||||
for _, volume := range podVolumes {
|
||||
if rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
// There is nothing to reconstruct
|
||||
continue
|
||||
}
|
||||
volumeInDSW := rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName)
|
||||
|
||||
reconstructedVolume, err := rc.reconstructVolume(volume)
|
||||
if err != nil {
|
||||
if volumeInDSW {
|
||||
// Some pod needs the volume, don't clean it up and hope that
|
||||
// reconcile() calls SetUp and reconstructs the volume in ASW.
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// No pod needs the volume.
|
||||
glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err)
|
||||
rc.cleanupMounts(volume)
|
||||
continue
|
||||
}
|
||||
if volumeInDSW {
|
||||
// Some pod needs the volume. And it exists on disk. Some previous
|
||||
// kubelet must have created the directory, therefore it must have
|
||||
// reported the volume as in use. Mark the volume as in use also in
|
||||
// this new kubelet so reconcile() calls SetUp and re-mounts the
|
||||
// volume if it's necessary.
|
||||
volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName)
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// There is no pod that uses the volume.
|
||||
if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) {
|
||||
glog.Warning("Volume is in pending operation, skip cleaning up mounts")
|
||||
}
|
||||
@ -386,7 +420,9 @@ func (rc *reconciler) syncStates() {
|
||||
glog.Errorf("Error occurred during reconstruct volume from disk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(volumeNeedReport) > 0 {
|
||||
rc.desiredStateOfWorld.MarkVolumesReportedInUse(volumeNeedReport)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *reconciler) cleanupMounts(volume podVolume) {
|
||||
@ -401,7 +437,7 @@ func (rc *reconciler) cleanupMounts(volume podVolume) {
|
||||
}
|
||||
// TODO: Currently cleanupMounts only includes UnmountVolume operation. In the next PR, we will add
|
||||
// to unmount both volume and device in the same routine.
|
||||
err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld)
|
||||
err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir)
|
||||
if err != nil {
|
||||
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error())
|
||||
return
|
||||
@ -455,7 +491,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
||||
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
if checkErr != nil {
|
||||
return nil, err
|
||||
return nil, checkErr
|
||||
}
|
||||
// If mount or symlink doesn't exist, volume reconstruction should be failed
|
||||
if !isExist {
|
||||
@ -573,7 +609,8 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re
|
||||
volume.mounter,
|
||||
volume.blockVolumeMapper,
|
||||
volume.outerVolumeSpecName,
|
||||
volume.volumeGidValue)
|
||||
volume.volumeGidValue,
|
||||
volume.volumeSpec)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add pod to volume information to actual state of world: %v", err)
|
||||
continue
|
||||
|
187
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
187
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -479,7 +480,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -508,12 +509,8 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
1 /* expectedAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetGlobalMapPathCallCount(
|
||||
1 /* expectedGetGlobalMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetPodDeviceMapPathCallCount(
|
||||
1 /* expectedPodDeviceMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifySetUpDeviceCallCount(
|
||||
1 /* expectedSetUpDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetMapDeviceCallCount(
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
@ -570,7 +567,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -600,12 +597,8 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetGlobalMapPathCallCount(
|
||||
1 /* expectedGetGlobalMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetPodDeviceMapPathCallCount(
|
||||
1 /* expectedPodDeviceMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifySetUpDeviceCallCount(
|
||||
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetMapDeviceCallCount(
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
@ -662,7 +655,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -691,12 +684,8 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
1 /* expectedAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetGlobalMapPathCallCount(
|
||||
1 /* expectedGetGlobalMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetPodDeviceMapPathCallCount(
|
||||
1 /* expectedPodDeviceMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifySetUpDeviceCallCount(
|
||||
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetMapDeviceCallCount(
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
@ -764,7 +753,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -795,12 +784,8 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetGlobalMapPathCallCount(
|
||||
1 /* expectedGetGlobalMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetPodDeviceMapPathCallCount(
|
||||
1 /* expectedPodDeviceMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifySetUpDeviceCallCount(
|
||||
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetMapDeviceCallCount(
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
@ -908,7 +893,7 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
volumeToUnmount := operationexecutor.MountedVolume{
|
||||
PluginName: "fake-file-plugin",
|
||||
VolumeSpec: tmpSpec}
|
||||
err := oex.UnmountVolume(volumeToUnmount, asw)
|
||||
err := oex.UnmountVolume(volumeToUnmount, asw, "" /* podsDir */)
|
||||
// Assert
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.expectedErrMsg)
|
||||
@ -965,6 +950,120 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||
// Enables controllerAttachDetachEnabled.
|
||||
// Calls Run()
|
||||
// Wait for volume mounted.
|
||||
// Mark volume as fsResizeRequired in ASW.
|
||||
// Verifies volume's fsResizeRequired flag is cleared later.
|
||||
func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.ExpandInUsePersistentVolumes))
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv",
|
||||
UID: "pvuid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Name: "pvc"},
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc",
|
||||
UID: "pvcuid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv",
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createtestClientWithPVPVC(pv, pvc)
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
kubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
|
||||
reconciler := NewReconciler(
|
||||
kubeClient,
|
||||
true, /* controllerAttachDetachEnabled */
|
||||
reconcilerLoopSleepDuration,
|
||||
reconcilerSyncStatesSleepPeriod,
|
||||
waitForAttachTimeout,
|
||||
nodeName,
|
||||
dsw,
|
||||
asw,
|
||||
hasAddedPods,
|
||||
oex,
|
||||
&mount.FakeMounter{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
|
||||
volumeSpec := &volume.Spec{PersistentVolume: pv}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
volumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
|
||||
|
||||
// Start the reconciler to fill ASW.
|
||||
stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
reconciler.Run(stopChan)
|
||||
close(stoppedChan)
|
||||
}()
|
||||
waitForMount(t, fakePlugin, volumeName, asw)
|
||||
// Stop the reconciler.
|
||||
close(stopChan)
|
||||
<-stoppedChan
|
||||
|
||||
// Mark volume as fsResizeRequired.
|
||||
asw.MarkFSResizeRequired(volumeName, podName)
|
||||
_, _, podExistErr := asw.PodExistsInVolume(podName, volumeName)
|
||||
if !cache.IsFSResizeRequiredError(podExistErr) {
|
||||
t.Fatalf("Volume should be marked as fsResizeRequired, but receive unexpected error: %v", podExistErr)
|
||||
}
|
||||
|
||||
// Start the reconciler again, we hope reconciler will perform the
|
||||
// resize operation and clear the fsResizeRequired flag for volume.
|
||||
go reconciler.Run(wait.NeverStop)
|
||||
|
||||
waitErr := retryWithExponentialBackOff(500*time.Millisecond, func() (done bool, err error) {
|
||||
mounted, _, err := asw.PodExistsInVolume(podName, volumeName)
|
||||
return mounted && err == nil, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
t.Fatal("Volume resize should succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
func waitForMount(
|
||||
t *testing.T,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin,
|
||||
@ -1029,11 +1128,10 @@ func createTestClient() *fake.Clientset {
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
Name: "fake-plugin/volume-name",
|
||||
Name: "fake-plugin/fake-device1",
|
||||
DevicePath: "fake/path",
|
||||
},
|
||||
}},
|
||||
Spec: v1.NodeSpec{ExternalID: string(nodeName)},
|
||||
}, nil
|
||||
})
|
||||
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
@ -1045,3 +1143,30 @@ func createTestClient() *fake.Clientset {
|
||||
func runReconciler(reconciler Reconciler) {
|
||||
go reconciler.Run(wait.NeverStop)
|
||||
}
|
||||
|
||||
func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) *fake.Clientset {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("get", "nodes",
|
||||
func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
Name: "fake-plugin/pv",
|
||||
DevicePath: "fake/path",
|
||||
},
|
||||
}},
|
||||
}, nil
|
||||
})
|
||||
fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, pvc, nil
|
||||
})
|
||||
fakeClient.AddReactor("get", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, pv, nil
|
||||
})
|
||||
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
||||
})
|
||||
return fakeClient
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go
generated
vendored
@ -255,9 +255,10 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co
|
||||
podVolumes := make(container.VolumeMap)
|
||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{
|
||||
Mounter: mountedVolume.Mounter,
|
||||
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
|
||||
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
|
||||
Mounter: mountedVolume.Mounter,
|
||||
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
|
||||
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
|
||||
InnerVolumeSpecName: mountedVolume.InnerVolumeSpecName,
|
||||
}
|
||||
}
|
||||
return podVolumes
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_test.go
generated
vendored
@ -55,7 +55,8 @@ func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager())
|
||||
cpm := podtest.NewMockCheckpointManager()
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
|
||||
|
||||
node, pod, pv, claim := createObjects()
|
||||
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
|
||||
@ -97,7 +98,8 @@ func TestInitialPendingVolumesForPodAndGetVolumesInUse(t *testing.T) {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager())
|
||||
cpm := podtest.NewMockCheckpointManager()
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
|
||||
|
||||
node, pod, pv, claim := createObjects()
|
||||
claim.Status = v1.PersistentVolumeClaimStatus{
|
||||
@ -135,7 +137,8 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager())
|
||||
cpm := podtest.NewMockCheckpointManager()
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
|
||||
|
||||
node, pod, _, claim := createObjects()
|
||||
|
||||
@ -243,11 +246,10 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
Name: "fake/pvA",
|
||||
Name: "fake/fake-device",
|
||||
DevicePath: "fake/path",
|
||||
},
|
||||
}},
|
||||
Spec: v1.NodeSpec{ExternalID: testHostname},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
Reference in New Issue
Block a user