mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
vendor updates
This commit is contained in:
7
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
@ -19,10 +19,10 @@ go_library(
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/nestedpendingoperations:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -36,15 +36,14 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["reconciler_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
|
275
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
275
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
@ -15,7 +15,7 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Package reconciler implements interfaces that attempt to reconcile the
|
||||
// desired state of the with the actual state of the world by triggering
|
||||
// desired state of the world with the actual state of the world by triggering
|
||||
// relevant actions (attach, detach, mount, unmount).
|
||||
package reconciler
|
||||
|
||||
@ -41,10 +41,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
volumepkg "k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// Reconciler runs a periodic loop to reconcile the desired state of the world
|
||||
@ -140,26 +140,18 @@ type reconciler struct {
|
||||
}
|
||||
|
||||
func (rc *reconciler) Run(stopCh <-chan struct{}) {
|
||||
// Wait for the populator to indicate that it has actually populated the desired state of world, meaning it has
|
||||
// completed a populate loop that started after sources are all ready. After, there's no need to keep checking.
|
||||
wait.PollUntil(rc.loopSleepDuration, func() (bool, error) {
|
||||
rc.reconciliationLoopFunc(rc.populatorHasAddedPods())()
|
||||
return rc.populatorHasAddedPods(), nil
|
||||
}, stopCh)
|
||||
wait.Until(rc.reconciliationLoopFunc(true), rc.loopSleepDuration, stopCh)
|
||||
wait.Until(rc.reconciliationLoopFunc(), rc.loopSleepDuration, stopCh)
|
||||
}
|
||||
|
||||
func (rc *reconciler) reconciliationLoopFunc(populatorHasAddedPods bool) func() {
|
||||
func (rc *reconciler) reconciliationLoopFunc() func() {
|
||||
return func() {
|
||||
rc.reconcile()
|
||||
|
||||
// Add a check that the populator has added pods so that reconciler's reconstruct process will start
|
||||
// after desired state of world is populated with pod volume information. Otherwise, reconciler's
|
||||
// reconstruct process may add incomplete volume information and cause confusion. In addition, if the
|
||||
// desired state of world has not been populated yet, the reconstruct process may clean up pods' volumes
|
||||
// that are still in use because desired state of world does not contain a complete list of pods.
|
||||
if populatorHasAddedPods && time.Since(rc.timeOfLastSync) > rc.syncDuration {
|
||||
glog.V(5).Infof("Desired state of world has been populated with pods, starting reconstruct state function")
|
||||
// Sync the state with the reality once after all existing pods are added to the desired state from all sources.
|
||||
// Otherwise, the reconstruct process may clean up pods' volumes that are still in use because
|
||||
// desired state of world does not contain a complete list of pods.
|
||||
if rc.populatorHasAddedPods() && !rc.StatesHasBeenSynced() {
|
||||
glog.Infof("Reconciler: start to sync state")
|
||||
rc.sync()
|
||||
}
|
||||
}
|
||||
@ -174,12 +166,10 @@ func (rc *reconciler) reconcile() {
|
||||
// Ensure volumes that should be unmounted are unmounted.
|
||||
for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() {
|
||||
if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) {
|
||||
volumeHandler, err := operationexecutor.NewVolumeHandler(mountedVolume.VolumeSpec, rc.operationExecutor)
|
||||
if err != nil {
|
||||
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountVolume failed"), err).Error())
|
||||
continue
|
||||
}
|
||||
err = volumeHandler.UnmountVolumeHandler(mountedVolume.MountedVolume, rc.actualStateOfWorld)
|
||||
// Volume is mounted, unmount it
|
||||
glog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", ""))
|
||||
err := rc.operationExecutor.UnmountVolume(
|
||||
mountedVolume.MountedVolume, rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
@ -201,7 +191,7 @@ func (rc *reconciler) reconcile() {
|
||||
if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable {
|
||||
// Volume is not attached (or doesn't implement attacher), kubelet attach is disabled, wait
|
||||
// for controller to finish attaching volume.
|
||||
glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""))
|
||||
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""))
|
||||
err := rc.operationExecutor.VerifyControllerAttachedVolume(
|
||||
volumeToMount.VolumeToMount,
|
||||
rc.nodeName,
|
||||
@ -224,7 +214,7 @@ func (rc *reconciler) reconcile() {
|
||||
VolumeSpec: volumeToMount.VolumeSpec,
|
||||
NodeName: rc.nodeName,
|
||||
}
|
||||
glog.V(12).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""))
|
||||
glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""))
|
||||
err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
@ -244,12 +234,12 @@ func (rc *reconciler) reconcile() {
|
||||
if isRemount {
|
||||
remountingLogStr = "Volume is already mounted to pod, but remount was requested."
|
||||
}
|
||||
volumeHandler, err := operationexecutor.NewVolumeHandler(volumeToMount.VolumeSpec, rc.operationExecutor)
|
||||
if err != nil {
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for MountVolume failed"), err).Error())
|
||||
continue
|
||||
}
|
||||
err = volumeHandler.MountVolumeHandler(rc.waitForAttachTimeout, volumeToMount.VolumeToMount, rc.actualStateOfWorld, isRemount, remountingLogStr)
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr))
|
||||
err := rc.operationExecutor.MountVolume(
|
||||
rc.waitForAttachTimeout,
|
||||
volumeToMount.VolumeToMount,
|
||||
rc.actualStateOfWorld,
|
||||
isRemount)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
@ -273,12 +263,10 @@ func (rc *reconciler) reconcile() {
|
||||
if !rc.desiredStateOfWorld.VolumeExists(attachedVolume.VolumeName) &&
|
||||
!rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName) {
|
||||
if attachedVolume.GloballyMounted {
|
||||
volumeHandler, err := operationexecutor.NewVolumeHandler(attachedVolume.VolumeSpec, rc.operationExecutor)
|
||||
if err != nil {
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountDevice failed"), err).Error())
|
||||
continue
|
||||
}
|
||||
err = volumeHandler.UnmountDeviceHandler(attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter)
|
||||
// Volume is globally mounted to device, unmount it
|
||||
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", ""))
|
||||
err := rc.operationExecutor.UnmountDevice(
|
||||
attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
@ -297,7 +285,7 @@ func (rc *reconciler) reconcile() {
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath)))
|
||||
} else {
|
||||
// Only detach if kubelet detach is enabled
|
||||
glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
|
||||
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
|
||||
err := rc.operationExecutor.DetachVolume(
|
||||
attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
@ -319,11 +307,11 @@ func (rc *reconciler) reconcile() {
|
||||
// sync process tries to observe the real world by scanning all pods' volume directories from the disk.
|
||||
// If the actual and desired state of worlds are not consistent with the observed world, it means that some
|
||||
// mounted volumes are left out probably during kubelet restart. This process will reconstruct
|
||||
// the volumes and udpate the actual and desired states. In the following reconciler loop, those volumes will
|
||||
// be cleaned up.
|
||||
// the volumes and update the actual and desired states. For the volumes that cannot support reconstruction,
|
||||
// it will try to clean up the mount paths with operation executor.
|
||||
func (rc *reconciler) sync() {
|
||||
defer rc.updateLastSyncTime()
|
||||
rc.syncStates(rc.kubeletPodsDir)
|
||||
rc.syncStates()
|
||||
}
|
||||
|
||||
func (rc *reconciler) updateLastSyncTime() {
|
||||
@ -348,7 +336,7 @@ type reconstructedVolume struct {
|
||||
volumeSpec *volumepkg.Spec
|
||||
outerVolumeSpecName string
|
||||
pod *v1.Pod
|
||||
pluginIsAttachable bool
|
||||
attachablePlugin volumepkg.AttachableVolumePlugin
|
||||
volumeGidValue string
|
||||
devicePath string
|
||||
reportedInUse bool
|
||||
@ -356,66 +344,41 @@ type reconstructedVolume struct {
|
||||
blockVolumeMapper volumepkg.BlockVolumeMapper
|
||||
}
|
||||
|
||||
// reconstructFromDisk scans the volume directories under the given pod directory. If the volume is not
|
||||
// in either actual or desired state of world, or pending operation, this function will reconstruct
|
||||
// the volume spec and put it in both the actual and desired state of worlds. If no running
|
||||
// container is mounting the volume, the volume will be removed by desired state of world's populator and
|
||||
// cleaned up by the reconciler.
|
||||
func (rc *reconciler) syncStates(podsDir string) {
|
||||
// syncStates scans the volume directories under the given pod directory.
|
||||
// If the volume is not in desired state of world, this function will reconstruct
|
||||
// the volume related information and put it in both the actual and desired state of worlds.
|
||||
// For some volume plugins that cannot support reconstruction, it will clean up the existing
|
||||
// mount points since the volume is no long needed (removed from desired state)
|
||||
func (rc *reconciler) syncStates() {
|
||||
// Get volumes information by reading the pod's directory
|
||||
podVolumes, err := getVolumesFromPodDir(podsDir)
|
||||
podVolumes, err := getVolumesFromPodDir(rc.kubeletPodsDir)
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot get volumes from disk %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume)
|
||||
for _, volume := range podVolumes {
|
||||
reconstructedVolume, err := rc.reconstructVolume(volume)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not construct volume information: %v", err)
|
||||
if rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// Check if there is an pending operation for the given pod and volume.
|
||||
// Need to check pending operation before checking the actual and desired
|
||||
// states to avoid race condition during checking. For example, the following
|
||||
// might happen if pending operation is checked after checking actual and desired states.
|
||||
// 1. Checking the pod and it does not exist in either actual or desired state.
|
||||
// 2. An operation for the given pod finishes and the actual state is updated.
|
||||
// 3. Checking and there is no pending operation for the given pod.
|
||||
// During state reconstruction period, no new volume operations could be issued. If the
|
||||
// mounted path is not in either pending operation, or actual or desired states, this
|
||||
// volume needs to be reconstructed back to the states.
|
||||
pending := rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, reconstructedVolume.podName)
|
||||
dswExist := rc.desiredStateOfWorld.PodExistsInVolume(reconstructedVolume.podName, reconstructedVolume.volumeName)
|
||||
aswExist, _, _ := rc.actualStateOfWorld.PodExistsInVolume(reconstructedVolume.podName, reconstructedVolume.volumeName)
|
||||
|
||||
if !rc.StatesHasBeenSynced() {
|
||||
// In case this is the first time to reconstruct state after kubelet starts, for a persistant volume, it must have
|
||||
// been mounted before kubelet restarts because no mount operations could be started at this time (node
|
||||
// status has not yet been updated before this very first syncStates finishes, so that VerifyControllerAttachedVolume will fail),
|
||||
// In this case, the volume state should be put back to actual state now no matter desired state has it or not.
|
||||
// This is to prevent node status from being updated to empty for attachable volumes. This might happen because
|
||||
// in the case that a volume is discovered on disk, and it is part of desired state, but is then quickly deleted
|
||||
// from the desired state. If in such situation, the volume is not added to the actual state, the node status updater will
|
||||
// not get this volume from either actual or desired state. In turn, this might cause master controller
|
||||
// detaching while the volume is still mounted.
|
||||
if aswExist || !reconstructedVolume.pluginIsAttachable {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// Check pending first since no new operations could be started at this point.
|
||||
// Otherwise there might a race condition in checking actual states and pending operations
|
||||
if pending || dswExist || aswExist {
|
||||
continue
|
||||
}
|
||||
if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
reconstructedVolume, err := rc.reconstructVolume(volume)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err)
|
||||
rc.cleanupMounts(volume)
|
||||
continue
|
||||
}
|
||||
if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) {
|
||||
glog.Warning("Volume is in pending operation, skip cleaning up mounts")
|
||||
}
|
||||
|
||||
glog.V(2).Infof(
|
||||
"Reconciler sync states: could not find pod information in desired or actual states or pending operation, update it in both states: %+v",
|
||||
"Reconciler sync states: could not find pod information in desired state, update it in actual state: %+v",
|
||||
reconstructedVolume)
|
||||
volumesNeedUpdate[reconstructedVolume.volumeName] = reconstructedVolume
|
||||
|
||||
}
|
||||
|
||||
if len(volumesNeedUpdate) > 0 {
|
||||
@ -426,7 +389,26 @@ func (rc *reconciler) syncStates(podsDir string) {
|
||||
|
||||
}
|
||||
|
||||
// Reconstruct Volume object and reconstructedVolume data structure by reading the pod's volume directories
|
||||
func (rc *reconciler) cleanupMounts(volume podVolume) {
|
||||
glog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points",
|
||||
volume.podName, volume.volumeSpecName)
|
||||
mountedVolume := operationexecutor.MountedVolume{
|
||||
PodName: volume.podName,
|
||||
VolumeName: v1.UniqueVolumeName(volume.volumeSpecName),
|
||||
InnerVolumeSpecName: volume.volumeSpecName,
|
||||
PluginName: volume.pluginName,
|
||||
PodUID: types.UID(volume.podName),
|
||||
}
|
||||
// TODO: Currently cleanupMounts only includes UnmountVolume operation. In the next PR, we will add
|
||||
// to unmount both volume and device in the same routine.
|
||||
err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld)
|
||||
if err != nil {
|
||||
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct volume data structure by reading the pod's volume directories
|
||||
func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, error) {
|
||||
// plugin initializations
|
||||
plugin, err := rc.volumePluginMgr.FindPluginByName(volume.pluginName)
|
||||
@ -438,27 +420,18 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create volumeSpec
|
||||
// Create pod object
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(volume.podName),
|
||||
},
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
var mapperPlugin volumepkg.BlockVolumePlugin
|
||||
tmpSpec := &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
mapperPlugin, err = rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpSpec = &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volume.volumeMode}}}
|
||||
}
|
||||
volumeHandler, err := operationexecutor.NewVolumeHandler(tmpSpec, rc.operationExecutor)
|
||||
mapperPlugin, err := rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volumeSpec, err := volumeHandler.ReconstructVolumeHandler(
|
||||
volumeSpec, err := rc.operationExecutor.ReconstructVolumeOperation(
|
||||
volume.volumeMode,
|
||||
plugin,
|
||||
mapperPlugin,
|
||||
pod.UID,
|
||||
@ -470,19 +443,17 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeName, err := plugin.GetVolumeName(volumeSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var uniqueVolumeName v1.UniqueVolumeName
|
||||
if attachablePlugin != nil {
|
||||
uniqueVolumeName = volumehelper.GetUniqueVolumeName(volume.pluginName, volumeName)
|
||||
uniqueVolumeName, err = util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
uniqueVolumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
|
||||
uniqueVolumeName = util.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
|
||||
}
|
||||
|
||||
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
||||
isExist, checkErr := volumeHandler.CheckVolumeExistence(volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
if checkErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -507,7 +478,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
var volumeMapper volumepkg.BlockVolumeMapper
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock {
|
||||
var newMapperErr error
|
||||
if mapperPlugin != nil {
|
||||
volumeMapper, newMapperErr = mapperPlugin.NewBlockVolumeMapper(
|
||||
@ -530,23 +501,24 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
volumeName: uniqueVolumeName,
|
||||
podName: volume.podName,
|
||||
volumeSpec: volumeSpec,
|
||||
// volume.volumeSpecName is actually InnerVolumeSpecName. But this information will likely to be updated in updateStates()
|
||||
// by checking the desired state volumeToMount list and getting the real OuterVolumeSpecName.
|
||||
// In case the pod is deleted during this period and desired state does not have this information, it will not be used
|
||||
// volume.volumeSpecName is actually InnerVolumeSpecName. It will not be used
|
||||
// for volume cleanup.
|
||||
// TODO: in case pod is added back before reconciler starts to unmount, we can update this field from desired state information
|
||||
outerVolumeSpecName: volume.volumeSpecName,
|
||||
pod: pod,
|
||||
pluginIsAttachable: attachablePlugin != nil,
|
||||
attachablePlugin: attachablePlugin,
|
||||
volumeGidValue: "",
|
||||
devicePath: "",
|
||||
mounter: volumeMounter,
|
||||
blockVolumeMapper: volumeMapper,
|
||||
// devicePath is updated during updateStates() by checking node status's VolumesAttached data.
|
||||
// TODO: get device path directly from the volume mount path.
|
||||
devicePath: "",
|
||||
mounter: volumeMounter,
|
||||
blockVolumeMapper: volumeMapper,
|
||||
}
|
||||
return reconstructedVolume, nil
|
||||
}
|
||||
|
||||
func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error {
|
||||
// Get the node status to retrieve volume device path information.
|
||||
// updateDevicePath gets the node status to retrieve volume device path information.
|
||||
func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) {
|
||||
node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{})
|
||||
if fetchErr != nil {
|
||||
glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr)
|
||||
@ -559,26 +531,42 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the list of volumes from desired state and update OuterVolumeSpecName if the information is available
|
||||
volumesToMount := rc.desiredStateOfWorld.GetVolumesToMount()
|
||||
for _, volumeToMount := range volumesToMount {
|
||||
if volume, exists := volumesNeedUpdate[volumeToMount.VolumeName]; exists {
|
||||
volume.outerVolumeSpecName = volumeToMount.OuterVolumeSpecName
|
||||
volumesNeedUpdate[volumeToMount.VolumeName] = volume
|
||||
glog.V(4).Infof("Update OuterVolumeSpecName from desired state for volume (%q): %q",
|
||||
volumeToMount.VolumeName, volume.outerVolumeSpecName)
|
||||
func getDeviceMountPath(volume *reconstructedVolume) (string, error) {
|
||||
volumeAttacher, err := volume.attachablePlugin.NewAttacher()
|
||||
if volumeAttacher == nil || err != nil {
|
||||
return "", err
|
||||
}
|
||||
deviceMountPath, err :=
|
||||
volumeAttacher.GetDeviceMountPath(volume.volumeSpec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if volume.blockVolumeMapper != nil {
|
||||
deviceMountPath, err =
|
||||
volume.blockVolumeMapper.GetGlobalMapPath(volume.volumeSpec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return deviceMountPath, nil
|
||||
}
|
||||
|
||||
func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error {
|
||||
// Get the node status to retrieve volume device path information.
|
||||
rc.updateDevicePath(volumesNeedUpdate)
|
||||
|
||||
for _, volume := range volumesNeedUpdate {
|
||||
err := rc.actualStateOfWorld.MarkVolumeAsAttached(
|
||||
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108
|
||||
volume.volumeName, volume.volumeSpec, "" /* nodeName */, volume.devicePath)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add volume information to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = rc.actualStateOfWorld.AddPodToVolume(
|
||||
err = rc.actualStateOfWorld.MarkVolumeAsMounted(
|
||||
volume.podName,
|
||||
types.UID(volume.podName),
|
||||
volume.volumeName,
|
||||
@ -590,22 +578,19 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re
|
||||
glog.Errorf("Could not add pod to volume information to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
if volume.pluginIsAttachable {
|
||||
err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName)
|
||||
glog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName)
|
||||
if volume.attachablePlugin != nil {
|
||||
deviceMountPath, err := getDeviceMountPath(volume)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not find device mount path for volume %s", volume.volumeName)
|
||||
continue
|
||||
}
|
||||
err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName, volume.devicePath, deviceMountPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not mark device is mounted to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.Infof("Volume: %v is mounted", volume.volumeName)
|
||||
}
|
||||
|
||||
_, err = rc.desiredStateOfWorld.AddPodToVolume(volume.podName,
|
||||
volume.pod,
|
||||
volume.volumeSpec,
|
||||
volume.outerVolumeSpecName,
|
||||
volume.volumeGidValue)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add pod to volume information to desired state of world: %v", err)
|
||||
glog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -641,7 +626,7 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) {
|
||||
for volumeMode, volumesDir := range volumesDirs {
|
||||
var volumesDirInfo []os.FileInfo
|
||||
if volumesDirInfo, err = ioutil.ReadDir(volumesDir); err != nil {
|
||||
// Just skip the loop becuase given volumesDir doesn't exist depending on volumeMode
|
||||
// Just skip the loop because given volumesDir doesn't exist depending on volumeMode
|
||||
continue
|
||||
}
|
||||
for _, volumeDir := range volumesDirInfo {
|
||||
@ -667,6 +652,6 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(10).Infof("Get volumes from pod directory %q %+v", podDir, volumes)
|
||||
glog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes)
|
||||
return volumes, nil
|
||||
}
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
@ -36,8 +36,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -149,7 +149,7 @@ func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
@ -227,7 +227,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
|
||||
@ -306,7 +306,7 @@ func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
@ -396,7 +396,7 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
@ -491,7 +491,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
@ -582,7 +582,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
|
||||
@ -674,7 +674,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
@ -776,7 +776,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
@ -835,6 +835,8 @@ func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
volumePluginMgr := &volume.VolumePluginMgr{}
|
||||
@ -854,14 +856,20 @@ func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
volumeToMount := operationexecutor.VolumeToMount{Pod: pod, VolumeSpec: &volume.Spec{}}
|
||||
err := oex.MapVolume(waitForAttachTimeout, volumeToMount, asw)
|
||||
volumeMode := v1.PersistentVolumeBlock
|
||||
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
|
||||
volumeToMount := operationexecutor.VolumeToMount{
|
||||
Pod: pod,
|
||||
VolumeSpec: tmpSpec}
|
||||
err := oex.MountVolume(waitForAttachTimeout, volumeToMount, asw, false)
|
||||
// Assert
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.expectedErrMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
@ -882,6 +890,8 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
volumePluginMgr := &volume.VolumePluginMgr{}
|
||||
@ -893,14 +903,20 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
nil, /* fakeRecorder */
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
nil))
|
||||
volumeToUnmount := operationexecutor.MountedVolume{PluginName: "fake-file-plugin"}
|
||||
err := oex.UnmapVolume(volumeToUnmount, asw)
|
||||
volumeMode := v1.PersistentVolumeBlock
|
||||
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
|
||||
volumeToUnmount := operationexecutor.MountedVolume{
|
||||
PluginName: "fake-file-plugin",
|
||||
VolumeSpec: tmpSpec}
|
||||
err := oex.UnmountVolume(volumeToUnmount, asw)
|
||||
// Assert
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.expectedErrMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
@ -912,15 +928,17 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
"volumePlugin is nil": {
|
||||
volumePlugins: []volume.VolumePlugin{},
|
||||
expectErr: true,
|
||||
expectedErrMsg: "UnmapDevice.FindMapperPluginBySpec failed",
|
||||
expectedErrMsg: "UnmapDevice.FindMapperPluginByName failed",
|
||||
},
|
||||
"blockVolumePlugin is nil": {
|
||||
volumePlugins: volumetesting.NewFakeFileVolumePlugin(),
|
||||
expectErr: true,
|
||||
expectedErrMsg: "UnmapDevice.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.",
|
||||
expectedErrMsg: "UnmapDevice.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.",
|
||||
},
|
||||
}
|
||||
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
volumePluginMgr := &volume.VolumePluginMgr{}
|
||||
@ -933,14 +951,18 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
nil))
|
||||
var mounter mount.Interface
|
||||
deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: &volume.Spec{}}
|
||||
err := oex.UnmapDevice(deviceToDetach, asw, mounter)
|
||||
volumeMode := v1.PersistentVolumeBlock
|
||||
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
|
||||
deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: tmpSpec, PluginName: "fake-file-plugin"}
|
||||
err := oex.UnmountDevice(deviceToDetach, asw, mounter)
|
||||
// Assert
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.expectedErrMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func waitForMount(
|
||||
@ -949,7 +971,7 @@ func waitForMount(
|
||||
volumeName v1.UniqueVolumeName,
|
||||
asw cache.ActualStateOfWorld) {
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
time.Duration(500*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
mountedVolumes := asw.GetMountedVolumes()
|
||||
for _, mountedVolume := range mountedVolumes {
|
||||
@ -973,7 +995,7 @@ func waitForDetach(
|
||||
volumeName v1.UniqueVolumeName,
|
||||
asw cache.ActualStateOfWorld) {
|
||||
err := retryWithExponentialBackOff(
|
||||
time.Duration(5*time.Millisecond),
|
||||
time.Duration(500*time.Millisecond),
|
||||
func() (bool, error) {
|
||||
if asw.VolumeExists(volumeName) {
|
||||
return false, nil
|
||||
|
Reference in New Issue
Block a user