mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor files
This commit is contained in:
84
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["volume_manager.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager",
|
||||
deps = [
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/populator:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/reconciler:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["volume_manager_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/configmap:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/pod/testing:go_default_library",
|
||||
"//pkg/kubelet/secret:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/status/testing:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/volumemanager/cache:all-srcs",
|
||||
"//pkg/kubelet/volumemanager/populator:all-srcs",
|
||||
"//pkg/kubelet/volumemanager/reconciler:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
10
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
approvers:
|
||||
- saad-ali
|
||||
reviewers:
|
||||
- jsafrane
|
||||
- gnufied
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
||||
- verult
|
||||
- davidz627
|
56
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"actual_state_of_world.go",
|
||||
"desired_state_of_world.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"actual_state_of_world_test.go",
|
||||
"desired_state_of_world_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
695
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
Normal file
695
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
Normal file
@ -0,0 +1,695 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package cache implements data structures used by the kubelet volume manager to
|
||||
keep track of attached volumes and the pods that mounted them.
|
||||
*/
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// ActualStateOfWorld defines a set of thread-safe operations for the kubelet
|
||||
// volume manager's actual state of the world cache.
|
||||
// This cache contains volumes->pods i.e. a set of all volumes attached to this
|
||||
// node and the pods that the manager believes have successfully mounted the
|
||||
// volume.
|
||||
// Note: This is distinct from the ActualStateOfWorld implemented by the
|
||||
// attach/detach controller. They both keep track of different objects. This
|
||||
// contains kubelet volume manager specific state.
|
||||
type ActualStateOfWorld interface {
|
||||
// ActualStateOfWorld must implement the methods required to allow
|
||||
// operationexecutor to interact with it.
|
||||
operationexecutor.ActualStateOfWorldMounterUpdater
|
||||
|
||||
// ActualStateOfWorld must implement the methods required to allow
|
||||
// operationexecutor to interact with it.
|
||||
operationexecutor.ActualStateOfWorldAttacherUpdater
|
||||
|
||||
// AddPodToVolume adds the given pod to the given volume in the cache
|
||||
// indicating the specified volume has been successfully mounted to the
|
||||
// specified pod.
|
||||
// If a pod with the same unique name already exists under the specified
|
||||
// volume, reset the pod's remountRequired value.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// attached volumes, an error is returned.
|
||||
AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string) error
|
||||
|
||||
// MarkRemountRequired marks each volume that is successfully attached and
|
||||
// mounted for the specified pod as requiring remount (if the plugin for the
|
||||
// volume indicates it requires remounting on pod updates). Atomically
|
||||
// updating volumes depend on this to update the contents of the volume on
|
||||
// pod update.
|
||||
MarkRemountRequired(podName volumetypes.UniquePodName)
|
||||
|
||||
// SetVolumeGloballyMounted sets the GloballyMounted value for the given
|
||||
// volume. When set to true this value indicates that the volume is mounted
|
||||
// to the underlying device at a global mount point. This global mount point
|
||||
// must unmounted prior to detach.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// attached volumes, an error is returned.
|
||||
SetVolumeGloballyMounted(volumeName v1.UniqueVolumeName, globallyMounted bool) error
|
||||
|
||||
// DeletePodFromVolume removes the given pod from the given volume in the
|
||||
// cache indicating the volume has been successfully unmounted from the pod.
|
||||
// If a pod with the same unique name does not exist under the specified
|
||||
// volume, this is a no-op.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// attached volumes, an error is returned.
|
||||
DeletePodFromVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error
|
||||
|
||||
// DeleteVolume removes the given volume from the list of attached volumes
|
||||
// in the cache indicating the volume has been successfully detached from
|
||||
// this node.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// attached volumes, this is a no-op.
|
||||
// If a volume with the name volumeName exists and its list of mountedPods
|
||||
// is not empty, an error is returned.
|
||||
DeleteVolume(volumeName v1.UniqueVolumeName) error
|
||||
|
||||
// PodExistsInVolume returns true if the given pod exists in the list of
|
||||
// mountedPods for the given volume in the cache, indicating that the volume
|
||||
// is attached to this node and the pod has successfully mounted it.
|
||||
// If a pod with the same unique name does not exist under the specified
|
||||
// volume, false is returned.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// attached volumes, a volumeNotAttachedError is returned indicating the
|
||||
// given volume is not yet attached.
|
||||
// If the given volumeName/podName combo exists but the value of
|
||||
// remountRequired is true, a remountRequiredError is returned indicating
|
||||
// the given volume has been successfully mounted to this pod but should be
|
||||
// remounted to reflect changes in the referencing pod. Atomically updating
|
||||
// volumes, depend on this to update the contents of the volume.
|
||||
// All volume mounting calls should be idempotent so a second mount call for
|
||||
// volumes that do not need to update contents should not fail.
|
||||
PodExistsInVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (bool, string, error)
|
||||
|
||||
// VolumeExists returns true if the given volume exists in the list of
|
||||
// attached volumes in the cache, indicating the volume is attached to this
|
||||
// node.
|
||||
VolumeExists(volumeName v1.UniqueVolumeName) bool
|
||||
|
||||
// GetMountedVolumes generates and returns a list of volumes and the pods
|
||||
// they are successfully attached and mounted for based on the current
|
||||
// actual state of the world.
|
||||
GetMountedVolumes() []MountedVolume
|
||||
|
||||
// GetMountedVolumesForPod generates and returns a list of volumes that are
|
||||
// successfully attached and mounted for the specified pod based on the
|
||||
// current actual state of the world.
|
||||
GetMountedVolumesForPod(podName volumetypes.UniquePodName) []MountedVolume
|
||||
|
||||
// GetGloballyMountedVolumes generates and returns a list of all attached
|
||||
// volumes that are globally mounted. This list can be used to determine
|
||||
// which volumes should be reported as "in use" in the node's VolumesInUse
|
||||
// status field. Globally mounted here refers to the shared plugin mount
|
||||
// point for the attachable volume from which the pod specific mount points
|
||||
// are created (via bind mount).
|
||||
GetGloballyMountedVolumes() []AttachedVolume
|
||||
|
||||
// GetUnmountedVolumes generates and returns a list of attached volumes that
|
||||
// have no mountedPods. This list can be used to determine which volumes are
|
||||
// no longer referenced and may be globally unmounted and detached.
|
||||
GetUnmountedVolumes() []AttachedVolume
|
||||
|
||||
// GetPods generates and returns a map of pods in which map is indexed
|
||||
// with pod's unique name. This map can be used to determine which pod is currently
|
||||
// in actual state of world.
|
||||
GetPods() map[volumetypes.UniquePodName]bool
|
||||
}
|
||||
|
||||
// MountedVolume represents a volume that has successfully been mounted to a pod.
|
||||
type MountedVolume struct {
|
||||
operationexecutor.MountedVolume
|
||||
}
|
||||
|
||||
// AttachedVolume represents a volume that is attached to a node.
|
||||
type AttachedVolume struct {
|
||||
operationexecutor.AttachedVolume
|
||||
|
||||
// GloballyMounted indicates that the volume is mounted to the underlying
|
||||
// device at a global mount point. This global mount point must unmounted
|
||||
// prior to detach.
|
||||
GloballyMounted bool
|
||||
}
|
||||
|
||||
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
|
||||
func NewActualStateOfWorld(
|
||||
nodeName types.NodeName,
|
||||
volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
|
||||
return &actualStateOfWorld{
|
||||
nodeName: nodeName,
|
||||
attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume),
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
}
|
||||
}
|
||||
|
||||
// IsVolumeNotAttachedError returns true if the specified error is a
|
||||
// volumeNotAttachedError.
|
||||
func IsVolumeNotAttachedError(err error) bool {
|
||||
_, ok := err.(volumeNotAttachedError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsRemountRequiredError returns true if the specified error is a
|
||||
// remountRequiredError.
|
||||
func IsRemountRequiredError(err error) bool {
|
||||
_, ok := err.(remountRequiredError)
|
||||
return ok
|
||||
}
|
||||
|
||||
type actualStateOfWorld struct {
|
||||
// nodeName is the name of this node. This value is passed to Attach/Detach
|
||||
nodeName types.NodeName
|
||||
|
||||
// attachedVolumes is a map containing the set of volumes the kubelet volume
|
||||
// manager believes to be successfully attached to this node. Volume types
|
||||
// that do not implement an attacher interface are assumed to be in this
|
||||
// state by default.
|
||||
// The key in this map is the name of the volume and the value is an object
|
||||
// containing more information about the attached volume.
|
||||
attachedVolumes map[v1.UniqueVolumeName]attachedVolume
|
||||
|
||||
// volumePluginMgr is the volume plugin manager used to create volume
|
||||
// plugin objects.
|
||||
volumePluginMgr *volume.VolumePluginMgr
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// attachedVolume represents a volume the kubelet volume manager believes to be
|
||||
// successfully attached to a node it is managing. Volume types that do not
|
||||
// implement an attacher are assumed to be in this state.
|
||||
type attachedVolume struct {
|
||||
// volumeName contains the unique identifier for this volume.
|
||||
volumeName v1.UniqueVolumeName
|
||||
|
||||
// mountedPods is a map containing the set of pods that this volume has been
|
||||
// successfully mounted to. The key in this map is the name of the pod and
|
||||
// the value is a mountedPod object containing more information about the
|
||||
// pod.
|
||||
mountedPods map[volumetypes.UniquePodName]mountedPod
|
||||
|
||||
// spec is the volume spec containing the specification for this volume.
|
||||
// Used to generate the volume plugin object, and passed to plugin methods.
|
||||
// In particular, the Unmount method uses spec.Name() as the volumeSpecName
|
||||
// in the mount path:
|
||||
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/
|
||||
spec *volume.Spec
|
||||
|
||||
// pluginName is the Unescaped Qualified name of the volume plugin used to
|
||||
// attach and mount this volume. It is stored separately in case the full
|
||||
// volume spec (everything except the name) can not be reconstructed for a
|
||||
// volume that should be unmounted (which would be the case for a mount path
|
||||
// read from disk without a full volume spec).
|
||||
pluginName string
|
||||
|
||||
// pluginIsAttachable indicates the volume plugin used to attach and mount
|
||||
// this volume implements the volume.Attacher interface
|
||||
pluginIsAttachable bool
|
||||
|
||||
// globallyMounted indicates that the volume is mounted to the underlying
|
||||
// device at a global mount point. This global mount point must be unmounted
|
||||
// prior to detach.
|
||||
globallyMounted bool
|
||||
|
||||
// devicePath contains the path on the node where the volume is attached for
|
||||
// attachable volumes
|
||||
devicePath string
|
||||
}
|
||||
|
||||
// The mountedPod object represents a pod for which the kubelet volume manager
|
||||
// believes the underlying volume has been successfully been mounted.
|
||||
type mountedPod struct {
|
||||
// the name of the pod
|
||||
podName volumetypes.UniquePodName
|
||||
|
||||
// the UID of the pod
|
||||
podUID types.UID
|
||||
|
||||
// mounter used to mount
|
||||
mounter volume.Mounter
|
||||
|
||||
// mapper used to block volumes support
|
||||
blockVolumeMapper volume.BlockVolumeMapper
|
||||
|
||||
// outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced
|
||||
// directly in the pod. If the volume was referenced through a persistent
|
||||
// volume claim, this contains the volume.Spec.Name() of the persistent
|
||||
// volume claim
|
||||
outerVolumeSpecName string
|
||||
|
||||
// remountRequired indicates the underlying volume has been successfully
|
||||
// mounted to this pod but it should be remounted to reflect changes in the
|
||||
// referencing pod.
|
||||
// Atomically updating volumes depend on this to update the contents of the
|
||||
// volume. All volume mounting calls should be idempotent so a second mount
|
||||
// call for volumes that do not need to update contents should not fail.
|
||||
remountRequired bool
|
||||
|
||||
// volumeGidValue contains the value of the GID annotation, if present.
|
||||
volumeGidValue string
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
|
||||
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error {
|
||||
return asw.addVolume(volumeName, volumeSpec, devicePath)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.DeleteVolume(volumeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsMounted(
|
||||
podName volumetypes.UniquePodName,
|
||||
podUID types.UID,
|
||||
volumeName v1.UniqueVolumeName,
|
||||
mounter volume.Mounter,
|
||||
blockVolumeMapper volume.BlockVolumeMapper,
|
||||
outerVolumeSpecName string,
|
||||
volumeGidValue string) error {
|
||||
return asw.AddPodToVolume(
|
||||
podName,
|
||||
podUID,
|
||||
volumeName,
|
||||
mounter,
|
||||
blockVolumeMapper,
|
||||
outerVolumeSpecName,
|
||||
volumeGidValue)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
// no operation for kubelet side
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
|
||||
// no operation for kubelet side
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsUnmounted(
|
||||
podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error {
|
||||
return asw.DeletePodFromVolume(podName, volumeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkDeviceAsMounted(
|
||||
volumeName v1.UniqueVolumeName) error {
|
||||
return asw.SetVolumeGloballyMounted(volumeName, true /* globallyMounted */)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkDeviceAsUnmounted(
|
||||
volumeName v1.UniqueVolumeName) error {
|
||||
return asw.SetVolumeGloballyMounted(volumeName, false /* globallyMounted */)
|
||||
}
|
||||
|
||||
// addVolume adds the given volume to the cache indicating the specified
|
||||
// volume is attached to this node. If no volume name is supplied, a unique
|
||||
// volume name is generated from the volumeSpec and returned on success. If a
|
||||
// volume with the same generated name already exists, this is a noop. If no
|
||||
// volume plugin can support the given volumeSpec or more than one plugin can
|
||||
// support it, an error is returned.
|
||||
func (asw *actualStateOfWorld) addVolume(
|
||||
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, devicePath string) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
volumePlugin, err := asw.volumePluginMgr.FindPluginBySpec(volumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
return fmt.Errorf(
|
||||
"failed to get Plugin from volumeSpec for volume %q err=%v",
|
||||
volumeSpec.Name(),
|
||||
err)
|
||||
}
|
||||
|
||||
if len(volumeName) == 0 {
|
||||
volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
|
||||
volumeSpec.Name(),
|
||||
volumePlugin.GetPluginName(),
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
pluginIsAttachable := false
|
||||
if _, ok := volumePlugin.(volume.AttachableVolumePlugin); ok {
|
||||
pluginIsAttachable = true
|
||||
}
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if !volumeExists {
|
||||
volumeObj = attachedVolume{
|
||||
volumeName: volumeName,
|
||||
spec: volumeSpec,
|
||||
mountedPods: make(map[volumetypes.UniquePodName]mountedPod),
|
||||
pluginName: volumePlugin.GetPluginName(),
|
||||
pluginIsAttachable: pluginIsAttachable,
|
||||
globallyMounted: false,
|
||||
devicePath: devicePath,
|
||||
}
|
||||
} else {
|
||||
// If volume object already exists, update the fields such as device path
|
||||
volumeObj.devicePath = devicePath
|
||||
glog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q",
|
||||
volumeName,
|
||||
devicePath)
|
||||
}
|
||||
asw.attachedVolumes[volumeName] = volumeObj
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddPodToVolume(
|
||||
podName volumetypes.UniquePodName,
|
||||
podUID types.UID,
|
||||
volumeName v1.UniqueVolumeName,
|
||||
mounter volume.Mounter,
|
||||
blockVolumeMapper volume.BlockVolumeMapper,
|
||||
outerVolumeSpecName string,
|
||||
volumeGidValue string) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if !volumeExists {
|
||||
return fmt.Errorf(
|
||||
"no volume with the name %q exists in the list of attached volumes",
|
||||
volumeName)
|
||||
}
|
||||
|
||||
podObj, podExists := volumeObj.mountedPods[podName]
|
||||
if !podExists {
|
||||
podObj = mountedPod{
|
||||
podName: podName,
|
||||
podUID: podUID,
|
||||
mounter: mounter,
|
||||
blockVolumeMapper: blockVolumeMapper,
|
||||
outerVolumeSpecName: outerVolumeSpecName,
|
||||
volumeGidValue: volumeGidValue,
|
||||
}
|
||||
}
|
||||
|
||||
// If pod exists, reset remountRequired value
|
||||
podObj.remountRequired = false
|
||||
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkRemountRequired(
|
||||
podName volumetypes.UniquePodName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
for volumeName, volumeObj := range asw.attachedVolumes {
|
||||
for mountedPodName, podObj := range volumeObj.mountedPods {
|
||||
if mountedPodName != podName {
|
||||
continue
|
||||
}
|
||||
|
||||
volumePlugin, err :=
|
||||
asw.volumePluginMgr.FindPluginBySpec(volumeObj.spec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
// Log and continue processing
|
||||
glog.Errorf(
|
||||
"MarkRemountRequired failed to FindPluginBySpec for pod %q (podUid %q) volume: %q (volSpecName: %q)",
|
||||
podObj.podName,
|
||||
podObj.podUID,
|
||||
volumeObj.volumeName,
|
||||
volumeObj.spec.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
if volumePlugin.RequiresRemount() {
|
||||
podObj.remountRequired = true
|
||||
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) SetVolumeGloballyMounted(
|
||||
volumeName v1.UniqueVolumeName, globallyMounted bool) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if !volumeExists {
|
||||
return fmt.Errorf(
|
||||
"no volume with the name %q exists in the list of attached volumes",
|
||||
volumeName)
|
||||
}
|
||||
|
||||
volumeObj.globallyMounted = globallyMounted
|
||||
asw.attachedVolumes[volumeName] = volumeObj
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) DeletePodFromVolume(
|
||||
podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if !volumeExists {
|
||||
return fmt.Errorf(
|
||||
"no volume with the name %q exists in the list of attached volumes",
|
||||
volumeName)
|
||||
}
|
||||
|
||||
_, podExists := volumeObj.mountedPods[podName]
|
||||
if podExists {
|
||||
delete(asw.attachedVolumes[volumeName].mountedPods, podName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) DeleteVolume(volumeName v1.UniqueVolumeName) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if !volumeExists {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(volumeObj.mountedPods) != 0 {
|
||||
return fmt.Errorf(
|
||||
"failed to DeleteVolume %q, it still has %v mountedPods",
|
||||
volumeName,
|
||||
len(volumeObj.mountedPods))
|
||||
}
|
||||
|
||||
delete(asw.attachedVolumes, volumeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) PodExistsInVolume(
|
||||
podName volumetypes.UniquePodName,
|
||||
volumeName v1.UniqueVolumeName) (bool, string, error) {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if !volumeExists {
|
||||
return false, "", newVolumeNotAttachedError(volumeName)
|
||||
}
|
||||
|
||||
podObj, podExists := volumeObj.mountedPods[podName]
|
||||
if podExists && podObj.remountRequired {
|
||||
return true, volumeObj.devicePath, newRemountRequiredError(volumeObj.volumeName, podObj.podName)
|
||||
}
|
||||
|
||||
return podExists, volumeObj.devicePath, nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) VolumeExists(
|
||||
volumeName v1.UniqueVolumeName) bool {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
_, volumeExists := asw.attachedVolumes[volumeName]
|
||||
return volumeExists
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetMountedVolumes() []MountedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||
for _, volumeObj := range asw.attachedVolumes {
|
||||
for _, podObj := range volumeObj.mountedPods {
|
||||
mountedVolume = append(
|
||||
mountedVolume,
|
||||
getMountedVolume(&podObj, &volumeObj))
|
||||
}
|
||||
}
|
||||
|
||||
return mountedVolume
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetMountedVolumesForPod(
|
||||
podName volumetypes.UniquePodName) []MountedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||
for _, volumeObj := range asw.attachedVolumes {
|
||||
for mountedPodName, podObj := range volumeObj.mountedPods {
|
||||
if mountedPodName == podName {
|
||||
mountedVolume = append(
|
||||
mountedVolume,
|
||||
getMountedVolume(&podObj, &volumeObj))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mountedVolume
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetGloballyMountedVolumes() []AttachedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
globallyMountedVolumes := make(
|
||||
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||
for _, volumeObj := range asw.attachedVolumes {
|
||||
if volumeObj.globallyMounted {
|
||||
globallyMountedVolumes = append(
|
||||
globallyMountedVolumes,
|
||||
asw.newAttachedVolume(&volumeObj))
|
||||
}
|
||||
}
|
||||
|
||||
return globallyMountedVolumes
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetUnmountedVolumes() []AttachedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
unmountedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||
for _, volumeObj := range asw.attachedVolumes {
|
||||
if len(volumeObj.mountedPods) == 0 {
|
||||
unmountedVolumes = append(
|
||||
unmountedVolumes,
|
||||
asw.newAttachedVolume(&volumeObj))
|
||||
}
|
||||
}
|
||||
|
||||
return unmountedVolumes
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetPods() map[volumetypes.UniquePodName]bool {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
podList := make(map[volumetypes.UniquePodName]bool)
|
||||
for _, volumeObj := range asw.attachedVolumes {
|
||||
for podName := range volumeObj.mountedPods {
|
||||
if !podList[podName] {
|
||||
podList[podName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return podList
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) newAttachedVolume(
|
||||
attachedVolume *attachedVolume) AttachedVolume {
|
||||
return AttachedVolume{
|
||||
AttachedVolume: operationexecutor.AttachedVolume{
|
||||
VolumeName: attachedVolume.volumeName,
|
||||
VolumeSpec: attachedVolume.spec,
|
||||
NodeName: asw.nodeName,
|
||||
PluginIsAttachable: attachedVolume.pluginIsAttachable,
|
||||
DevicePath: attachedVolume.devicePath},
|
||||
GloballyMounted: attachedVolume.globallyMounted}
|
||||
}
|
||||
|
||||
// Compile-time check to ensure volumeNotAttachedError implements the error interface
|
||||
var _ error = volumeNotAttachedError{}
|
||||
|
||||
// volumeNotAttachedError is an error returned when PodExistsInVolume() fails to
|
||||
// find specified volume in the list of attached volumes.
|
||||
type volumeNotAttachedError struct {
|
||||
volumeName v1.UniqueVolumeName
|
||||
}
|
||||
|
||||
func (err volumeNotAttachedError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"volumeName %q does not exist in the list of attached volumes",
|
||||
err.volumeName)
|
||||
}
|
||||
|
||||
func newVolumeNotAttachedError(volumeName v1.UniqueVolumeName) error {
|
||||
return volumeNotAttachedError{
|
||||
volumeName: volumeName,
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check to ensure remountRequiredError implements the error interface
|
||||
var _ error = remountRequiredError{}
|
||||
|
||||
// remountRequiredError is an error returned when PodExistsInVolume() found
|
||||
// volume/pod attached/mounted but remountRequired was true, indicating the
|
||||
// given volume should be remounted to the pod to reflect changes in the
|
||||
// referencing pod.
|
||||
type remountRequiredError struct {
|
||||
volumeName v1.UniqueVolumeName
|
||||
podName volumetypes.UniquePodName
|
||||
}
|
||||
|
||||
func (err remountRequiredError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"volumeName %q is mounted to %q but should be remounted",
|
||||
err.volumeName, err.podName)
|
||||
}
|
||||
|
||||
func newRemountRequiredError(
|
||||
volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) error {
|
||||
return remountRequiredError{
|
||||
volumeName: volumeName,
|
||||
podName: podName,
|
||||
}
|
||||
}
|
||||
|
||||
// getMountedVolume constructs and returns a MountedVolume object from the given
|
||||
// mountedPod and attachedVolume objects.
|
||||
func getMountedVolume(
|
||||
mountedPod *mountedPod, attachedVolume *attachedVolume) MountedVolume {
|
||||
return MountedVolume{
|
||||
MountedVolume: operationexecutor.MountedVolume{
|
||||
PodName: mountedPod.podName,
|
||||
VolumeName: attachedVolume.volumeName,
|
||||
InnerVolumeSpecName: attachedVolume.spec.Name(),
|
||||
OuterVolumeSpecName: mountedPod.outerVolumeSpecName,
|
||||
PluginName: attachedVolume.pluginName,
|
||||
PodUID: mountedPod.podUID,
|
||||
Mounter: mountedPod.mounter,
|
||||
BlockVolumeMapper: mountedPod.blockVolumeMapper,
|
||||
VolumeGidValue: mountedPod.volumeGidValue,
|
||||
VolumeSpec: attachedVolume.spec}}
|
||||
}
|
548
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go
generated
vendored
Normal file
548
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go
generated
vendored
Normal file
@ -0,0 +1,548 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
var emptyVolumeName = v1.UniqueVolumeName("")
|
||||
|
||||
// Calls MarkVolumeAsAttached() once to add volume
|
||||
// Verifies newly added volume exists in GetUnmountedVolumes()
|
||||
// Verifies newly added volume doesn't exist in GetGloballyMountedVolumes()
|
||||
func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, _ := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
|
||||
// Act
|
||||
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsAsw(t, generatedVolumeName, true /* shouldExist */, asw)
|
||||
verifyVolumeExistsInUnmountedVolumes(t, generatedVolumeName, asw)
|
||||
verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName, asw)
|
||||
}
|
||||
|
||||
// Calls MarkVolumeAsAttached() once to add volume, specifying a name --
|
||||
// establishes that the supplied volume name is used to register the volume
|
||||
// rather than the generated one.
|
||||
// Verifies newly added volume exists in GetUnmountedVolumes()
|
||||
// Verifies newly added volume doesn't exist in GetGloballyMountedVolumes()
|
||||
func Test_MarkVolumeAsAttached_SuppliedVolumeName_Positive_NewVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
devicePath := "fake/device/path"
|
||||
volumeName := v1.UniqueVolumeName("this-would-never-be-a-volume-name")
|
||||
|
||||
// Act
|
||||
err := asw.MarkVolumeAsAttached(volumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsAsw(t, volumeName, true /* shouldExist */, asw)
|
||||
verifyVolumeExistsInUnmountedVolumes(t, volumeName, asw)
|
||||
verifyVolumeDoesntExistInGloballyMountedVolumes(t, volumeName, asw)
|
||||
}
|
||||
|
||||
// Calls MarkVolumeAsAttached() twice to add the same volume
|
||||
// Verifies second call doesn't fail
|
||||
// Verifies newly added volume exists in GetUnmountedVolumes()
|
||||
// Verifies newly added volume doesn't exist in GetGloballyMountedVolumes()
|
||||
func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
devicePath := "fake/device/path"
|
||||
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
generatedVolumeName, _ := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
|
||||
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Act
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsAsw(t, generatedVolumeName, true /* shouldExist */, asw)
|
||||
verifyVolumeExistsInUnmountedVolumes(t, generatedVolumeName, asw)
|
||||
verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName, asw)
|
||||
}
|
||||
|
||||
// Populates data struct with a volume
|
||||
// Calls AddPodToVolume() to add a pod to the volume
|
||||
// Verifies volume/pod combo exist using PodExistsInVolume()
|
||||
func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||
devicePath := "fake/device/path"
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
|
||||
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewMounter failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
mapper, err := plugin.NewBlockVolumeMapper(volumeSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewBlockVolumeMapper failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Act
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsAsw(t, generatedVolumeName, true /* shouldExist */, asw)
|
||||
verifyVolumeDoesntExistInUnmountedVolumes(t, generatedVolumeName, asw)
|
||||
verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName, asw)
|
||||
verifyPodExistsInVolumeAsw(t, podName, generatedVolumeName, "fake/device/path" /* expectedDevicePath */, asw)
|
||||
}
|
||||
|
||||
// Populates data struct with a volume
|
||||
// Calls AddPodToVolume() twice to add the same pod to the volume
|
||||
// Verifies volume/pod combo exist using PodExistsInVolume() and the second call
|
||||
// did not fail.
|
||||
func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||
devicePath := "fake/device/path"
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
||||
plugin, volumeSpec)
|
||||
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
|
||||
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewMounter failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
mapper, err := plugin.NewBlockVolumeMapper(volumeSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewBlockVolumeMapper failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Act
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsAsw(t, generatedVolumeName, true /* shouldExist */, asw)
|
||||
verifyVolumeDoesntExistInUnmountedVolumes(t, generatedVolumeName, asw)
|
||||
verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName, asw)
|
||||
verifyPodExistsInVolumeAsw(t, podName, generatedVolumeName, "fake/device/path" /* expectedDevicePath */, asw)
|
||||
}
|
||||
|
||||
// Calls AddPodToVolume() to add pod to empty data stuct
|
||||
// Verifies call fails with "volume does not exist" error.
|
||||
func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
plugin, err := volumePluginMgr.FindPluginBySpec(volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"volumePluginMgr.FindPluginBySpec failed to find volume plugin for %#v with: %v",
|
||||
volumeSpec,
|
||||
err)
|
||||
}
|
||||
|
||||
blockplugin, err := volumePluginMgr.FindMapperPluginBySpec(volumeSpec)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"volumePluginMgr.FindMapperPluginBySpec failed to find volume plugin for %#v with: %v",
|
||||
volumeSpec,
|
||||
err)
|
||||
}
|
||||
|
||||
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
||||
plugin, volumeSpec)
|
||||
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
|
||||
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewMounter failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
mapper, err := blockplugin.NewBlockVolumeMapper(volumeSpec, pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewBlockVolumeMapper failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Act
|
||||
err = asw.AddPodToVolume(
|
||||
podName, pod.UID, volumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
// Assert
|
||||
if err == nil {
|
||||
t.Fatalf("AddPodToVolume did not fail. Expected: <\"no volume with the name ... exists in the list of attached volumes\"> Actual: <no error>")
|
||||
}
|
||||
|
||||
verifyVolumeExistsAsw(t, volumeName, false /* shouldExist */, asw)
|
||||
verifyVolumeDoesntExistInUnmountedVolumes(t, volumeName, asw)
|
||||
verifyVolumeDoesntExistInGloballyMountedVolumes(t, volumeName, asw)
|
||||
verifyPodDoesntExistInVolumeAsw(
|
||||
t,
|
||||
podName,
|
||||
volumeName,
|
||||
false, /* expectVolumeToExist */
|
||||
asw)
|
||||
}
|
||||
|
||||
// Calls MarkVolumeAsAttached() once to add volume
|
||||
// Calls MarkDeviceAsMounted() to mark volume as globally mounted.
|
||||
// Verifies newly added volume exists in GetUnmountedVolumes()
|
||||
// Verifies newly added volume exists in GetGloballyMountedVolumes()
|
||||
func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
devicePath := "fake/device/path"
|
||||
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
|
||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||
if err != nil {
|
||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Act
|
||||
err = asw.MarkDeviceAsMounted(generatedVolumeName)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("MarkDeviceAsMounted failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsAsw(t, generatedVolumeName, true /* shouldExist */, asw)
|
||||
verifyVolumeExistsInUnmountedVolumes(t, generatedVolumeName, asw)
|
||||
verifyVolumeExistsInGloballyMountedVolumes(t, generatedVolumeName, asw)
|
||||
}
|
||||
|
||||
func verifyVolumeExistsInGloballyMountedVolumes(
|
||||
t *testing.T, expectedVolumeName v1.UniqueVolumeName, asw ActualStateOfWorld) {
|
||||
globallyMountedVolumes := asw.GetGloballyMountedVolumes()
|
||||
for _, volume := range globallyMountedVolumes {
|
||||
if volume.VolumeName == expectedVolumeName {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf(
|
||||
"Could not find volume %v in the list of GloballyMountedVolumes for actual state of world %+v",
|
||||
expectedVolumeName,
|
||||
globallyMountedVolumes)
|
||||
}
|
||||
|
||||
func verifyVolumeDoesntExistInGloballyMountedVolumes(
|
||||
t *testing.T, volumeToCheck v1.UniqueVolumeName, asw ActualStateOfWorld) {
|
||||
globallyMountedVolumes := asw.GetGloballyMountedVolumes()
|
||||
for _, volume := range globallyMountedVolumes {
|
||||
if volume.VolumeName == volumeToCheck {
|
||||
t.Fatalf(
|
||||
"Found volume %v in the list of GloballyMountedVolumes. Expected it not to exist.",
|
||||
volumeToCheck)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyVolumeExistsAsw(
|
||||
t *testing.T,
|
||||
expectedVolumeName v1.UniqueVolumeName,
|
||||
shouldExist bool,
|
||||
asw ActualStateOfWorld) {
|
||||
volumeExists := asw.VolumeExists(expectedVolumeName)
|
||||
if shouldExist != volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) response incorrect. Expected: <%v> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
shouldExist,
|
||||
volumeExists)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyVolumeExistsInUnmountedVolumes(
|
||||
t *testing.T, expectedVolumeName v1.UniqueVolumeName, asw ActualStateOfWorld) {
|
||||
unmountedVolumes := asw.GetUnmountedVolumes()
|
||||
for _, volume := range unmountedVolumes {
|
||||
if volume.VolumeName == expectedVolumeName {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf(
|
||||
"Could not find volume %v in the list of UnmountedVolumes for actual state of world %+v",
|
||||
expectedVolumeName,
|
||||
unmountedVolumes)
|
||||
}
|
||||
|
||||
func verifyVolumeDoesntExistInUnmountedVolumes(
|
||||
t *testing.T, volumeToCheck v1.UniqueVolumeName, asw ActualStateOfWorld) {
|
||||
unmountedVolumes := asw.GetUnmountedVolumes()
|
||||
for _, volume := range unmountedVolumes {
|
||||
if volume.VolumeName == volumeToCheck {
|
||||
t.Fatalf(
|
||||
"Found volume %v in the list of UnmountedVolumes. Expected it not to exist.",
|
||||
volumeToCheck)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPodExistsInVolumeAsw(
|
||||
t *testing.T,
|
||||
expectedPodName volumetypes.UniquePodName,
|
||||
expectedVolumeName v1.UniqueVolumeName,
|
||||
expectedDevicePath string,
|
||||
asw ActualStateOfWorld) {
|
||||
podExistsInVolume, devicePath, err :=
|
||||
asw.PodExistsInVolume(expectedPodName, expectedVolumeName)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"ASW PodExistsInVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
if !podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"ASW PodExistsInVolume result invalid. Expected: <true> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
|
||||
if devicePath != expectedDevicePath {
|
||||
t.Fatalf(
|
||||
"Invalid devicePath. Expected: <%q> Actual: <%q> ",
|
||||
expectedDevicePath,
|
||||
devicePath)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPodDoesntExistInVolumeAsw(
|
||||
t *testing.T,
|
||||
podToCheck volumetypes.UniquePodName,
|
||||
volumeToCheck v1.UniqueVolumeName,
|
||||
expectVolumeToExist bool,
|
||||
asw ActualStateOfWorld) {
|
||||
podExistsInVolume, devicePath, err :=
|
||||
asw.PodExistsInVolume(podToCheck, volumeToCheck)
|
||||
if !expectVolumeToExist && err == nil {
|
||||
t.Fatalf(
|
||||
"ASW PodExistsInVolume did not return error. Expected: <error indicating volume does not exist> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
if expectVolumeToExist && err != nil {
|
||||
t.Fatalf(
|
||||
"ASW PodExistsInVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
if podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"ASW PodExistsInVolume result invalid. Expected: <false> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
|
||||
if devicePath != "" {
|
||||
t.Fatalf(
|
||||
"Invalid devicePath. Expected: <\"\"> Actual: <%q> ",
|
||||
devicePath)
|
||||
}
|
||||
}
|
356
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
Normal file
356
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
Normal file
@ -0,0 +1,356 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package cache implements data structures used by the kubelet volume manager to
|
||||
keep track of attached volumes and the pods that mounted them.
|
||||
*/
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet
|
||||
// volume manager's desired state of the world cache.
|
||||
// This cache contains volumes->pods i.e. a set of all volumes that should be
|
||||
// attached to this node and the pods that reference them and should mount the
|
||||
// volume.
|
||||
// Note: This is distinct from the DesiredStateOfWorld implemented by the
|
||||
// attach/detach controller. They both keep track of different objects. This
|
||||
// contains kubelet volume manager specific state.
|
||||
type DesiredStateOfWorld interface {
|
||||
// AddPodToVolume adds the given pod to the given volume in the cache
|
||||
// indicating the specified pod should mount the specified volume.
|
||||
// A unique volumeName is generated from the volumeSpec and returned on
|
||||
// success.
|
||||
// If no volume plugin can support the given volumeSpec or more than one
|
||||
// plugin can support it, an error is returned.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// volumes that should be attached to this node, the volume is implicitly
|
||||
// added.
|
||||
// If a pod with the same unique name already exists under the specified
|
||||
// volume, this is a no-op.
|
||||
AddPodToVolume(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, outerVolumeSpecName string, volumeGidValue string) (v1.UniqueVolumeName, error)
|
||||
|
||||
// MarkVolumesReportedInUse sets the ReportedInUse value to true for the
|
||||
// reportedVolumes. For volumes not in the reportedVolumes list, the
|
||||
// ReportedInUse value is reset to false. The default ReportedInUse value
|
||||
// for a newly created volume is false.
|
||||
// When set to true this value indicates that the volume was successfully
|
||||
// added to the VolumesInUse field in the node's status. Mount operation needs
|
||||
// to check this value before issuing the operation.
|
||||
// If a volume in the reportedVolumes list does not exist in the list of
|
||||
// volumes that should be attached to this node, it is skipped without error.
|
||||
MarkVolumesReportedInUse(reportedVolumes []v1.UniqueVolumeName)
|
||||
|
||||
// DeletePodFromVolume removes the given pod from the given volume in the
|
||||
// cache indicating the specified pod no longer requires the specified
|
||||
// volume.
|
||||
// If a pod with the same unique name does not exist under the specified
|
||||
// volume, this is a no-op.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// attached volumes, this is a no-op.
|
||||
// If after deleting the pod, the specified volume contains no other child
|
||||
// pods, the volume is also deleted.
|
||||
DeletePodFromVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName)
|
||||
|
||||
// VolumeExists returns true if the given volume exists in the list of
|
||||
// volumes that should be attached to this node.
|
||||
// If a pod with the same unique name does not exist under the specified
|
||||
// volume, false is returned.
|
||||
VolumeExists(volumeName v1.UniqueVolumeName) bool
|
||||
|
||||
// PodExistsInVolume returns true if the given pod exists in the list of
|
||||
// podsToMount for the given volume in the cache.
|
||||
// If a pod with the same unique name does not exist under the specified
|
||||
// volume, false is returned.
|
||||
// If a volume with the name volumeName does not exist in the list of
|
||||
// attached volumes, false is returned.
|
||||
PodExistsInVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName) bool
|
||||
|
||||
// GetVolumesToMount generates and returns a list of volumes that should be
|
||||
// attached to this node and the pods they should be mounted to based on the
|
||||
// current desired state of the world.
|
||||
GetVolumesToMount() []VolumeToMount
|
||||
|
||||
// GetPods generates and returns a map of pods in which map is indexed
|
||||
// with pod's unique name. This map can be used to determine which pod is currently
|
||||
// in desired state of world.
|
||||
GetPods() map[types.UniquePodName]bool
|
||||
}
|
||||
|
||||
// VolumeToMount represents a volume that is attached to this node and needs to
|
||||
// be mounted to PodName.
|
||||
type VolumeToMount struct {
|
||||
operationexecutor.VolumeToMount
|
||||
}
|
||||
|
||||
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
|
||||
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
|
||||
return &desiredStateOfWorld{
|
||||
volumesToMount: make(map[v1.UniqueVolumeName]volumeToMount),
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
}
|
||||
}
|
||||
|
||||
type desiredStateOfWorld struct {
|
||||
// volumesToMount is a map containing the set of volumes that should be
|
||||
// attached to this node and mounted to the pods referencing it. The key in
|
||||
// the map is the name of the volume and the value is a volume object
|
||||
// containing more information about the volume.
|
||||
volumesToMount map[v1.UniqueVolumeName]volumeToMount
|
||||
// volumePluginMgr is the volume plugin manager used to create volume
|
||||
// plugin objects.
|
||||
volumePluginMgr *volume.VolumePluginMgr
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// The volume object represents a volume that should be attached to this node,
|
||||
// and mounted to podsToMount.
|
||||
type volumeToMount struct {
|
||||
// volumeName contains the unique identifier for this volume.
|
||||
volumeName v1.UniqueVolumeName
|
||||
|
||||
// podsToMount is a map containing the set of pods that reference this
|
||||
// volume and should mount it once it is attached. The key in the map is
|
||||
// the name of the pod and the value is a pod object containing more
|
||||
// information about the pod.
|
||||
podsToMount map[types.UniquePodName]podToMount
|
||||
|
||||
// pluginIsAttachable indicates that the plugin for this volume implements
|
||||
// the volume.Attacher interface
|
||||
pluginIsAttachable bool
|
||||
|
||||
// volumeGidValue contains the value of the GID annotation, if present.
|
||||
volumeGidValue string
|
||||
|
||||
// reportedInUse indicates that the volume was successfully added to the
|
||||
// VolumesInUse field in the node's status.
|
||||
reportedInUse bool
|
||||
}
|
||||
|
||||
// The pod object represents a pod that references the underlying volume and
|
||||
// should mount it once it is attached.
|
||||
type podToMount struct {
|
||||
// podName contains the name of this pod.
|
||||
podName types.UniquePodName
|
||||
|
||||
// Pod to mount the volume to. Used to create NewMounter.
|
||||
pod *v1.Pod
|
||||
|
||||
// volume spec containing the specification for this volume. Used to
|
||||
// generate the volume plugin object, and passed to plugin methods.
|
||||
// For non-PVC volumes this is the same as defined in the pod object. For
|
||||
// PVC volumes it is from the dereferenced PV object.
|
||||
spec *volume.Spec
|
||||
|
||||
// outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced
|
||||
// directly in the pod. If the volume was referenced through a persistent
|
||||
// volume claim, this contains the volume.Spec.Name() of the persistent
|
||||
// volume claim
|
||||
outerVolumeSpecName string
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
podName types.UniquePodName,
|
||||
pod *v1.Pod,
|
||||
volumeSpec *volume.Spec,
|
||||
outerVolumeSpecName string,
|
||||
volumeGidValue string) (v1.UniqueVolumeName, error) {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
|
||||
volumePlugin, err := dsw.volumePluginMgr.FindPluginBySpec(volumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
return "", fmt.Errorf(
|
||||
"failed to get Plugin from volumeSpec for volume %q err=%v",
|
||||
volumeSpec.Name(),
|
||||
err)
|
||||
}
|
||||
|
||||
var volumeName v1.UniqueVolumeName
|
||||
|
||||
// The unique volume name used depends on whether the volume is attachable
|
||||
// or not.
|
||||
attachable := dsw.isAttachableVolume(volumeSpec)
|
||||
if attachable {
|
||||
// For attachable volumes, use the unique volume name as reported by
|
||||
// the plugin.
|
||||
volumeName, err =
|
||||
volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(
|
||||
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
|
||||
volumeSpec.Name(),
|
||||
volumePlugin.GetPluginName(),
|
||||
err)
|
||||
}
|
||||
} else {
|
||||
// For non-attachable volumes, generate a unique name based on the pod
|
||||
// namespace and name and the name of the volume within the pod.
|
||||
volumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
|
||||
}
|
||||
|
||||
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
||||
if !volumeExists {
|
||||
volumeObj = volumeToMount{
|
||||
volumeName: volumeName,
|
||||
podsToMount: make(map[types.UniquePodName]podToMount),
|
||||
pluginIsAttachable: attachable,
|
||||
volumeGidValue: volumeGidValue,
|
||||
reportedInUse: false,
|
||||
}
|
||||
dsw.volumesToMount[volumeName] = volumeObj
|
||||
}
|
||||
|
||||
// Create new podToMount object. If it already exists, it is refreshed with
|
||||
// updated values (this is required for volumes that require remounting on
|
||||
// pod update, like Downward API volumes).
|
||||
dsw.volumesToMount[volumeName].podsToMount[podName] = podToMount{
|
||||
podName: podName,
|
||||
pod: pod,
|
||||
spec: volumeSpec,
|
||||
outerVolumeSpecName: outerVolumeSpecName,
|
||||
}
|
||||
|
||||
return volumeName, nil
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) MarkVolumesReportedInUse(
|
||||
reportedVolumes []v1.UniqueVolumeName) {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
|
||||
reportedVolumesMap := make(
|
||||
map[v1.UniqueVolumeName]bool, len(reportedVolumes) /* capacity */)
|
||||
|
||||
for _, reportedVolume := range reportedVolumes {
|
||||
reportedVolumesMap[reportedVolume] = true
|
||||
}
|
||||
|
||||
for volumeName, volumeObj := range dsw.volumesToMount {
|
||||
_, volumeReported := reportedVolumesMap[volumeName]
|
||||
volumeObj.reportedInUse = volumeReported
|
||||
dsw.volumesToMount[volumeName] = volumeObj
|
||||
}
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) DeletePodFromVolume(
|
||||
podName types.UniquePodName, volumeName v1.UniqueVolumeName) {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
|
||||
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
||||
if !volumeExists {
|
||||
return
|
||||
}
|
||||
|
||||
if _, podExists := volumeObj.podsToMount[podName]; !podExists {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete pod if it exists
|
||||
delete(dsw.volumesToMount[volumeName].podsToMount, podName)
|
||||
|
||||
if len(dsw.volumesToMount[volumeName].podsToMount) == 0 {
|
||||
// Delete volume if no child pods left
|
||||
delete(dsw.volumesToMount, volumeName)
|
||||
}
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) VolumeExists(
|
||||
volumeName v1.UniqueVolumeName) bool {
|
||||
dsw.RLock()
|
||||
defer dsw.RUnlock()
|
||||
|
||||
_, volumeExists := dsw.volumesToMount[volumeName]
|
||||
return volumeExists
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) PodExistsInVolume(
|
||||
podName types.UniquePodName, volumeName v1.UniqueVolumeName) bool {
|
||||
dsw.RLock()
|
||||
defer dsw.RUnlock()
|
||||
|
||||
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
||||
if !volumeExists {
|
||||
return false
|
||||
}
|
||||
|
||||
_, podExists := volumeObj.podsToMount[podName]
|
||||
return podExists
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) GetPods() map[types.UniquePodName]bool {
|
||||
dsw.RLock()
|
||||
defer dsw.RUnlock()
|
||||
|
||||
podList := make(map[types.UniquePodName]bool)
|
||||
for _, volumeObj := range dsw.volumesToMount {
|
||||
for podName := range volumeObj.podsToMount {
|
||||
if !podList[podName] {
|
||||
podList[podName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return podList
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount {
|
||||
dsw.RLock()
|
||||
defer dsw.RUnlock()
|
||||
|
||||
volumesToMount := make([]VolumeToMount, 0 /* len */, len(dsw.volumesToMount) /* cap */)
|
||||
for volumeName, volumeObj := range dsw.volumesToMount {
|
||||
for podName, podObj := range volumeObj.podsToMount {
|
||||
volumesToMount = append(
|
||||
volumesToMount,
|
||||
VolumeToMount{
|
||||
VolumeToMount: operationexecutor.VolumeToMount{
|
||||
VolumeName: volumeName,
|
||||
PodName: podName,
|
||||
Pod: podObj.pod,
|
||||
VolumeSpec: podObj.spec,
|
||||
PluginIsAttachable: volumeObj.pluginIsAttachable,
|
||||
OuterVolumeSpecName: podObj.outerVolumeSpecName,
|
||||
VolumeGidValue: volumeObj.volumeGidValue,
|
||||
ReportedInUse: volumeObj.reportedInUse}})
|
||||
}
|
||||
}
|
||||
return volumesToMount
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) isAttachableVolume(volumeSpec *volume.Spec) bool {
|
||||
attachableVolumePlugin, _ :=
|
||||
dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
|
||||
if attachableVolumePlugin != nil {
|
||||
volumeAttacher, err := attachableVolumePlugin.NewAttacher()
|
||||
if err == nil && volumeAttacher != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
382
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go
generated
vendored
Normal file
382
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go
generated
vendored
Normal file
@ -0,0 +1,382 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// Calls AddPodToVolume() to add new pod to new volume
|
||||
// Verifies newly added pod/volume exists via
|
||||
// PodExistsInVolume() VolumeExists() and GetVolumesToMount()
|
||||
func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
UID: "pod3uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
|
||||
// Act
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsDsw(t, generatedVolumeName, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolumeName, false /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
|
||||
}
|
||||
|
||||
// Calls AddPodToVolume() twice to add the same pod to the same volume
|
||||
// Verifies newly added pod/volume exists via
|
||||
// PodExistsInVolume() VolumeExists() and GetVolumesToMount() and no errors.
|
||||
func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
UID: "pod3uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
|
||||
// Act
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
verifyVolumeExistsDsw(t, generatedVolumeName, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolumeName, false /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
|
||||
}
|
||||
|
||||
// Populates data struct with a new volume/pod
|
||||
// Calls DeletePodFromVolume() to removes the pod
|
||||
// Verifies newly added pod/volume are deleted
|
||||
func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
UID: "pod3uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
verifyVolumeExistsDsw(t, generatedVolumeName, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolumeName, false /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
|
||||
|
||||
// Act
|
||||
dsw.DeletePodFromVolume(podName, generatedVolumeName)
|
||||
|
||||
// Assert
|
||||
verifyVolumeDoesntExist(t, generatedVolumeName, dsw)
|
||||
verifyVolumeDoesntExistInVolumesToMount(t, generatedVolumeName, dsw)
|
||||
verifyPodDoesntExistInVolumeDsw(t, podName, generatedVolumeName, dsw)
|
||||
}
|
||||
|
||||
// Calls AddPodToVolume() to add three new volumes to data struct
|
||||
// Verifies newly added pod/volume exists via PodExistsInVolume()
|
||||
// VolumeExists() and GetVolumesToMount()
|
||||
// Marks only second volume as reported in use.
|
||||
// Verifies only that volume is marked reported in use
|
||||
// Marks only first volume as reported in use.
|
||||
// Verifies only that volume is marked reported in use
|
||||
func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
|
||||
pod1 := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume1-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volume1Spec := &volume.Spec{Volume: &pod1.Spec.Volumes[0]}
|
||||
pod1Name := volumehelper.GetUniquePodName(pod1)
|
||||
|
||||
pod2 := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume2-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volume2Spec := &volume.Spec{Volume: &pod2.Spec.Volumes[0]}
|
||||
pod2Name := volumehelper.GetUniquePodName(pod2)
|
||||
|
||||
pod3 := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
UID: "pod3uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume3-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volume3Spec := &volume.Spec{Volume: &pod3.Spec.Volumes[0]}
|
||||
pod3Name := volumehelper.GetUniquePodName(pod3)
|
||||
|
||||
generatedVolume1Name, err := dsw.AddPodToVolume(
|
||||
pod1Name, pod1, volume1Spec, volume1Spec.Name(), "" /* volumeGidValue */)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
generatedVolume2Name, err := dsw.AddPodToVolume(
|
||||
pod2Name, pod2, volume2Spec, volume2Spec.Name(), "" /* volumeGidValue */)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
generatedVolume3Name, err := dsw.AddPodToVolume(
|
||||
pod3Name, pod3, volume3Spec, volume3Spec.Name(), "" /* volumeGidValue */)
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
|
||||
// Act
|
||||
volumesReportedInUse := []v1.UniqueVolumeName{generatedVolume2Name}
|
||||
dsw.MarkVolumesReportedInUse(volumesReportedInUse)
|
||||
|
||||
// Assert
|
||||
verifyVolumeExistsDsw(t, generatedVolume1Name, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolume1Name, false /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, pod1Name, generatedVolume1Name, dsw)
|
||||
verifyVolumeExistsDsw(t, generatedVolume2Name, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolume2Name, true /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, pod2Name, generatedVolume2Name, dsw)
|
||||
verifyVolumeExistsDsw(t, generatedVolume3Name, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolume3Name, false /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, pod3Name, generatedVolume3Name, dsw)
|
||||
|
||||
// Act
|
||||
volumesReportedInUse = []v1.UniqueVolumeName{generatedVolume3Name}
|
||||
dsw.MarkVolumesReportedInUse(volumesReportedInUse)
|
||||
|
||||
// Assert
|
||||
verifyVolumeExistsDsw(t, generatedVolume1Name, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolume1Name, false /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, pod1Name, generatedVolume1Name, dsw)
|
||||
verifyVolumeExistsDsw(t, generatedVolume2Name, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolume2Name, false /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, pod2Name, generatedVolume2Name, dsw)
|
||||
verifyVolumeExistsDsw(t, generatedVolume3Name, dsw)
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, generatedVolume3Name, true /* expectReportedInUse */, dsw)
|
||||
verifyPodExistsInVolumeDsw(t, pod3Name, generatedVolume3Name, dsw)
|
||||
}
|
||||
|
||||
func verifyVolumeExistsDsw(
|
||||
t *testing.T, expectedVolumeName v1.UniqueVolumeName, dsw DesiredStateOfWorld) {
|
||||
volumeExists := dsw.VolumeExists(expectedVolumeName)
|
||||
if !volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) failed. Expected: <true> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyVolumeDoesntExist(
|
||||
t *testing.T, expectedVolumeName v1.UniqueVolumeName, dsw DesiredStateOfWorld) {
|
||||
volumeExists := dsw.VolumeExists(expectedVolumeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) returned incorrect value. Expected: <false> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyVolumeExistsInVolumesToMount(
|
||||
t *testing.T,
|
||||
expectedVolumeName v1.UniqueVolumeName,
|
||||
expectReportedInUse bool,
|
||||
dsw DesiredStateOfWorld) {
|
||||
volumesToMount := dsw.GetVolumesToMount()
|
||||
for _, volume := range volumesToMount {
|
||||
if volume.VolumeName == expectedVolumeName {
|
||||
if volume.ReportedInUse != expectReportedInUse {
|
||||
t.Fatalf(
|
||||
"Found volume %v in the list of VolumesToMount, but ReportedInUse incorrect. Expected: <%v> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
expectReportedInUse,
|
||||
volume.ReportedInUse)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf(
|
||||
"Could not find volume %v in the list of desired state of world volumes to mount %+v",
|
||||
expectedVolumeName,
|
||||
volumesToMount)
|
||||
}
|
||||
|
||||
func verifyVolumeDoesntExistInVolumesToMount(
|
||||
t *testing.T, volumeToCheck v1.UniqueVolumeName, dsw DesiredStateOfWorld) {
|
||||
volumesToMount := dsw.GetVolumesToMount()
|
||||
for _, volume := range volumesToMount {
|
||||
if volume.VolumeName == volumeToCheck {
|
||||
t.Fatalf(
|
||||
"Found volume %v in the list of desired state of world volumes to mount. Expected it not to exist.",
|
||||
volumeToCheck)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPodExistsInVolumeDsw(
|
||||
t *testing.T,
|
||||
expectedPodName volumetypes.UniquePodName,
|
||||
expectedVolumeName v1.UniqueVolumeName,
|
||||
dsw DesiredStateOfWorld) {
|
||||
if podExistsInVolume := dsw.PodExistsInVolume(
|
||||
expectedPodName, expectedVolumeName); !podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPodDoesntExistInVolumeDsw(
|
||||
t *testing.T,
|
||||
expectedPodName volumetypes.UniquePodName,
|
||||
expectedVolumeName v1.UniqueVolumeName,
|
||||
dsw DesiredStateOfWorld) {
|
||||
if podExistsInVolume := dsw.PodExistsInVolume(
|
||||
expectedPodName, expectedVolumeName); podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
}
|
72
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD
generated
vendored
Normal file
72
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["desired_state_of_world_populator.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["desired_state_of_world_populator_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/configmap:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/pod/testing:go_default_library",
|
||||
"//pkg/kubelet/secret:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/status/testing:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
527
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go
generated
vendored
Normal file
527
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go
generated
vendored
Normal file
@ -0,0 +1,527 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package populator implements interfaces that monitor and keep the states of the
|
||||
caches in sync with the "ground truth".
|
||||
*/
|
||||
package populator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// DesiredStateOfWorldPopulator periodically loops through the list of active
|
||||
// pods and ensures that each one exists in the desired state of the world cache
|
||||
// if it has volumes. It also verifies that the pods in the desired state of the
|
||||
// world cache still exist, if not, it removes them.
|
||||
type DesiredStateOfWorldPopulator interface {
|
||||
Run(sourcesReady config.SourcesReady, stopCh <-chan struct{})
|
||||
|
||||
// ReprocessPod removes the specified pod from the list of processedPods
|
||||
// (if it exists) forcing it to be reprocessed. This is required to enable
|
||||
// remounting volumes on pod updates (volumes like Downward API volumes
|
||||
// depend on this behavior to ensure volume content is updated).
|
||||
ReprocessPod(podName volumetypes.UniquePodName)
|
||||
|
||||
// HasAddedPods returns whether the populator has looped through the list
|
||||
// of active pods and added them to the desired state of the world cache,
|
||||
// at a time after sources are all ready, at least once. It does not
|
||||
// return true before sources are all ready because before then, there is
|
||||
// a chance many or all pods are missing from the list of active pods and
|
||||
// so few to none will have been added.
|
||||
HasAddedPods() bool
|
||||
}
|
||||
|
||||
// NewDesiredStateOfWorldPopulator returns a new instance of
|
||||
// DesiredStateOfWorldPopulator.
|
||||
//
|
||||
// kubeClient - used to fetch PV and PVC objects from the API server
|
||||
// loopSleepDuration - the amount of time the populator loop sleeps between
|
||||
// successive executions
|
||||
// podManager - the kubelet podManager that is the source of truth for the pods
|
||||
// that exist on this host
|
||||
// desiredStateOfWorld - the cache to populate
|
||||
func NewDesiredStateOfWorldPopulator(
|
||||
kubeClient clientset.Interface,
|
||||
loopSleepDuration time.Duration,
|
||||
getPodStatusRetryDuration time.Duration,
|
||||
podManager pod.Manager,
|
||||
podStatusProvider status.PodStatusProvider,
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld,
|
||||
kubeContainerRuntime kubecontainer.Runtime,
|
||||
keepTerminatedPodVolumes bool) DesiredStateOfWorldPopulator {
|
||||
return &desiredStateOfWorldPopulator{
|
||||
kubeClient: kubeClient,
|
||||
loopSleepDuration: loopSleepDuration,
|
||||
getPodStatusRetryDuration: getPodStatusRetryDuration,
|
||||
podManager: podManager,
|
||||
podStatusProvider: podStatusProvider,
|
||||
desiredStateOfWorld: desiredStateOfWorld,
|
||||
pods: processedPods{
|
||||
processedPods: make(map[volumetypes.UniquePodName]bool)},
|
||||
kubeContainerRuntime: kubeContainerRuntime,
|
||||
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
||||
hasAddedPods: false,
|
||||
hasAddedPodsLock: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
type desiredStateOfWorldPopulator struct {
|
||||
kubeClient clientset.Interface
|
||||
loopSleepDuration time.Duration
|
||||
getPodStatusRetryDuration time.Duration
|
||||
podManager pod.Manager
|
||||
podStatusProvider status.PodStatusProvider
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||
pods processedPods
|
||||
kubeContainerRuntime kubecontainer.Runtime
|
||||
timeOfLastGetPodStatus time.Time
|
||||
keepTerminatedPodVolumes bool
|
||||
hasAddedPods bool
|
||||
hasAddedPodsLock sync.RWMutex
|
||||
}
|
||||
|
||||
type processedPods struct {
|
||||
processedPods map[volumetypes.UniquePodName]bool
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) {
|
||||
// Wait for the completion of a loop that started after sources are all ready, then set hasAddedPods accordingly
|
||||
wait.PollUntil(dswp.loopSleepDuration, func() (bool, error) {
|
||||
done := sourcesReady.AllReady()
|
||||
dswp.populatorLoopFunc()()
|
||||
return done, nil
|
||||
}, stopCh)
|
||||
dswp.hasAddedPodsLock.Lock()
|
||||
dswp.hasAddedPods = true
|
||||
dswp.hasAddedPodsLock.Unlock()
|
||||
wait.Until(dswp.populatorLoopFunc(), dswp.loopSleepDuration, stopCh)
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) ReprocessPod(
|
||||
podName volumetypes.UniquePodName) {
|
||||
dswp.deleteProcessedPod(podName)
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) HasAddedPods() bool {
|
||||
dswp.hasAddedPodsLock.RLock()
|
||||
defer dswp.hasAddedPodsLock.RUnlock()
|
||||
return dswp.hasAddedPods
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() {
|
||||
return func() {
|
||||
dswp.findAndAddNewPods()
|
||||
|
||||
// findAndRemoveDeletedPods() calls out to the container runtime to
|
||||
// determine if the containers for a given pod are terminated. This is
|
||||
// an expensive operation, therefore we limit the rate that
|
||||
// findAndRemoveDeletedPods() is called independently of the main
|
||||
// populator loop.
|
||||
if time.Since(dswp.timeOfLastGetPodStatus) < dswp.getPodStatusRetryDuration {
|
||||
glog.V(5).Infof(
|
||||
"Skipping findAndRemoveDeletedPods(). Not permitted until %v (getPodStatusRetryDuration %v).",
|
||||
dswp.timeOfLastGetPodStatus.Add(dswp.getPodStatusRetryDuration),
|
||||
dswp.getPodStatusRetryDuration)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
}
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool {
|
||||
podStatus, found := dswp.podStatusProvider.GetPodStatus(pod.UID)
|
||||
if !found {
|
||||
podStatus = pod.Status
|
||||
}
|
||||
return volumehelper.IsPodTerminated(pod, podStatus)
|
||||
}
|
||||
|
||||
// Iterate through all pods and add to desired state of world if they don't
|
||||
// exist but should
|
||||
func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() {
|
||||
for _, pod := range dswp.podManager.GetPods() {
|
||||
if dswp.isPodTerminated(pod) {
|
||||
// Do not (re)add volumes for terminated pods
|
||||
continue
|
||||
}
|
||||
dswp.processPodVolumes(pod)
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate through all pods in desired state of world, and remove if they no
|
||||
// longer exist
|
||||
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
var runningPods []*kubecontainer.Pod
|
||||
|
||||
runningPodsFetched := false
|
||||
for _, volumeToMount := range dswp.desiredStateOfWorld.GetVolumesToMount() {
|
||||
pod, podExists := dswp.podManager.GetPodByUID(volumeToMount.Pod.UID)
|
||||
if podExists {
|
||||
// Skip running pods
|
||||
if !dswp.isPodTerminated(pod) {
|
||||
continue
|
||||
}
|
||||
if dswp.keepTerminatedPodVolumes {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Once a pod has been deleted from kubelet pod manager, do not delete
|
||||
// it immediately from volume manager. Instead, check the kubelet
|
||||
// containerRuntime to verify that all containers in the pod have been
|
||||
// terminated.
|
||||
if !runningPodsFetched {
|
||||
var getPodsErr error
|
||||
runningPods, getPodsErr = dswp.kubeContainerRuntime.GetPods(false)
|
||||
if getPodsErr != nil {
|
||||
glog.Errorf(
|
||||
"kubeContainerRuntime.findAndRemoveDeletedPods returned error %v.",
|
||||
getPodsErr)
|
||||
continue
|
||||
}
|
||||
|
||||
runningPodsFetched = true
|
||||
dswp.timeOfLastGetPodStatus = time.Now()
|
||||
}
|
||||
|
||||
runningContainers := false
|
||||
for _, runningPod := range runningPods {
|
||||
if runningPod.ID == volumeToMount.Pod.UID {
|
||||
if len(runningPod.Containers) > 0 {
|
||||
runningContainers = true
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if runningContainers {
|
||||
glog.V(5).Infof(
|
||||
"Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore, it will not be removed from volume manager.",
|
||||
format.Pod(volumeToMount.Pod))
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", ""))
|
||||
|
||||
dswp.desiredStateOfWorld.DeletePodFromVolume(
|
||||
volumeToMount.PodName, volumeToMount.VolumeName)
|
||||
dswp.deleteProcessedPod(volumeToMount.PodName)
|
||||
}
|
||||
}
|
||||
|
||||
// processPodVolumes processes the volumes in the given pod and adds them to the
|
||||
// desired state of the world.
|
||||
func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
uniquePodName := volumehelper.GetUniquePodName(pod)
|
||||
if dswp.podPreviouslyProcessed(uniquePodName) {
|
||||
return
|
||||
}
|
||||
|
||||
allVolumesAdded := true
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
|
||||
// Process volume spec for each volume defined in pod
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
volumeSpec, volumeGidValue, err :=
|
||||
dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
if err != nil {
|
||||
glog.Errorf(
|
||||
"Error processing volume %q for pod %q: %v",
|
||||
podVolume.Name,
|
||||
format.Pod(pod),
|
||||
err)
|
||||
allVolumesAdded = false
|
||||
continue
|
||||
}
|
||||
|
||||
// Add volume to desired state of world
|
||||
_, err = dswp.desiredStateOfWorld.AddPodToVolume(
|
||||
uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue)
|
||||
if err != nil {
|
||||
glog.Errorf(
|
||||
"Failed to add volume %q (specName: %q) for pod %q to desiredStateOfWorld. err=%v",
|
||||
podVolume.Name,
|
||||
volumeSpec.Name(),
|
||||
uniquePodName,
|
||||
err)
|
||||
allVolumesAdded = false
|
||||
}
|
||||
|
||||
glog.V(10).Infof(
|
||||
"Added volume %q (volSpec=%q) for pod %q to desired state.",
|
||||
podVolume.Name,
|
||||
volumeSpec.Name(),
|
||||
uniquePodName)
|
||||
}
|
||||
|
||||
// some of the volume additions may have failed, should not mark this pod as fully processed
|
||||
if allVolumesAdded {
|
||||
dswp.markPodProcessed(uniquePodName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// podPreviouslyProcessed returns true if the volumes for this pod have already
|
||||
// been processed by the populator
|
||||
func (dswp *desiredStateOfWorldPopulator) podPreviouslyProcessed(
|
||||
podName volumetypes.UniquePodName) bool {
|
||||
dswp.pods.RLock()
|
||||
defer dswp.pods.RUnlock()
|
||||
|
||||
_, exists := dswp.pods.processedPods[podName]
|
||||
return exists
|
||||
}
|
||||
|
||||
// markPodProcessed records that the volumes for the specified pod have been
|
||||
// processed by the populator
|
||||
func (dswp *desiredStateOfWorldPopulator) markPodProcessed(
|
||||
podName volumetypes.UniquePodName) {
|
||||
dswp.pods.Lock()
|
||||
defer dswp.pods.Unlock()
|
||||
|
||||
dswp.pods.processedPods[podName] = true
|
||||
}
|
||||
|
||||
// markPodProcessed removes the specified pod from processedPods
|
||||
func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
|
||||
podName volumetypes.UniquePodName) {
|
||||
dswp.pods.Lock()
|
||||
defer dswp.pods.Unlock()
|
||||
|
||||
delete(dswp.pods.processedPods, podName)
|
||||
}
|
||||
|
||||
// createVolumeSpec creates and returns a mutatable volume.Spec object for the
|
||||
// specified volume. It dereference any PVC to get PV objects, if needed.
|
||||
// Returns an error if unable to obtain the volume at this time.
|
||||
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*volume.Spec, string, error) {
|
||||
if pvcSource :=
|
||||
podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
glog.V(10).Infof(
|
||||
"Found PVC, ClaimName: %q/%q",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName)
|
||||
|
||||
// If podVolume is a PVC, fetch the real PV behind the claim
|
||||
pvName, pvcUID, err := dswp.getPVCExtractPV(
|
||||
podNamespace, pvcSource.ClaimName)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf(
|
||||
"error processing PVC %q/%q: %v",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
err)
|
||||
}
|
||||
|
||||
glog.V(10).Infof(
|
||||
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
pvcUID,
|
||||
pvName)
|
||||
|
||||
// Fetch actual PV object
|
||||
volumeSpec, volumeGidValue, err :=
|
||||
dswp.getPVSpec(pvName, pvcSource.ReadOnly, pvcUID)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf(
|
||||
"error processing PVC %q/%q: %v",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
err)
|
||||
}
|
||||
|
||||
glog.V(10).Infof(
|
||||
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
|
||||
volumeSpec.Name,
|
||||
pvName,
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
pvcUID)
|
||||
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := volumehelper.GetVolumeMode(volumeSpec)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
// Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem
|
||||
if mountsMap[podVolume.Name] && volumeMode != v1.PersistentVolumeFilesystem {
|
||||
return nil, "", fmt.Errorf(
|
||||
"Volume %q has volumeMode %q, but is specified in volumeMounts for pod %q/%q",
|
||||
podVolume.Name,
|
||||
volumeMode,
|
||||
podNamespace,
|
||||
podName)
|
||||
}
|
||||
// Error if a container has volumeDevices but the volumeMode of PVC isn't Block
|
||||
if devicesMap[podVolume.Name] && volumeMode != v1.PersistentVolumeBlock {
|
||||
return nil, "", fmt.Errorf(
|
||||
"Volume %q has volumeMode %q, but is specified in volumeDevices for pod %q/%q",
|
||||
podVolume.Name,
|
||||
volumeMode,
|
||||
podNamespace,
|
||||
podName)
|
||||
}
|
||||
}
|
||||
return volumeSpec, volumeGidValue, nil
|
||||
}
|
||||
|
||||
// Do not return the original volume object, since the source could mutate it
|
||||
clonedPodVolume := podVolume.DeepCopy()
|
||||
|
||||
return volume.NewSpecFromVolume(clonedPodVolume), "", nil
|
||||
}
|
||||
|
||||
// getPVCExtractPV fetches the PVC object with the given namespace and name from
|
||||
// the API server, checks whether PVC is being deleted, extracts the name of the PV
|
||||
// it is pointing to and returns it.
|
||||
// An error is returned if the PVC object's phase is not "Bound".
|
||||
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
||||
namespace string, claimName string) (string, types.UID, error) {
|
||||
pvc, err :=
|
||||
dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
|
||||
if err != nil || pvc == nil {
|
||||
return "", "", fmt.Errorf(
|
||||
"failed to fetch PVC %s/%s from API server. err=%v",
|
||||
namespace,
|
||||
claimName,
|
||||
err)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PVCProtection) {
|
||||
// Pods that uses a PVC that is being deleted must not be started.
|
||||
//
|
||||
// In case an old kubelet is running without this check or some kubelets
|
||||
// have this feature disabled, the worst that can happen is that such
|
||||
// pod is scheduled. This was the default behavior in 1.8 and earlier
|
||||
// and users should not be that surprised.
|
||||
// It should happen only in very rare case when scheduler schedules
|
||||
// a pod and user deletes a PVC that's used by it at the same time.
|
||||
if volumeutil.IsPVCBeingDeleted(pvc) {
|
||||
return "", "", fmt.Errorf(
|
||||
"can't start pod because PVC %s/%s is being deleted",
|
||||
namespace,
|
||||
claimName)
|
||||
}
|
||||
}
|
||||
|
||||
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
|
||||
return "", "", fmt.Errorf(
|
||||
"PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
|
||||
namespace,
|
||||
claimName,
|
||||
pvc.Status.Phase,
|
||||
pvc.Spec.VolumeName)
|
||||
}
|
||||
|
||||
return pvc.Spec.VolumeName, pvc.UID, nil
|
||||
}
|
||||
|
||||
// getPVSpec fetches the PV object with the given name from the API server
|
||||
// and returns a volume.Spec representing it.
|
||||
// An error is returned if the call to fetch the PV object fails.
|
||||
func (dswp *desiredStateOfWorldPopulator) getPVSpec(
|
||||
name string,
|
||||
pvcReadOnly bool,
|
||||
expectedClaimUID types.UID) (*volume.Spec, string, error) {
|
||||
pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{})
|
||||
if err != nil || pv == nil {
|
||||
return nil, "", fmt.Errorf(
|
||||
"failed to fetch PV %q from API server. err=%v", name, err)
|
||||
}
|
||||
|
||||
if pv.Spec.ClaimRef == nil {
|
||||
return nil, "", fmt.Errorf(
|
||||
"found PV object %q but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim",
|
||||
name)
|
||||
}
|
||||
|
||||
if pv.Spec.ClaimRef.UID != expectedClaimUID {
|
||||
return nil, "", fmt.Errorf(
|
||||
"found PV object %q but its pv.Spec.ClaimRef.UID (%q) does not point to claim.UID (%q)",
|
||||
name,
|
||||
pv.Spec.ClaimRef.UID,
|
||||
expectedClaimUID)
|
||||
}
|
||||
|
||||
volumeGidValue := getPVVolumeGidAnnotationValue(pv)
|
||||
return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGidValue, nil
|
||||
}
|
||||
|
||||
func (dswp *desiredStateOfWorldPopulator) makeVolumeMap(containers []v1.Container) (map[string]bool, map[string]bool) {
|
||||
volumeDevicesMap := make(map[string]bool)
|
||||
volumeMountsMap := make(map[string]bool)
|
||||
|
||||
for _, container := range containers {
|
||||
if container.VolumeMounts != nil {
|
||||
for _, mount := range container.VolumeMounts {
|
||||
volumeMountsMap[mount.Name] = true
|
||||
}
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) &&
|
||||
container.VolumeDevices != nil {
|
||||
for _, device := range container.VolumeDevices {
|
||||
volumeDevicesMap[device.Name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return volumeMountsMap, volumeDevicesMap
|
||||
}
|
||||
|
||||
func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string {
|
||||
if volumeGid, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]; ok {
|
||||
return volumeGid
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
545
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go
generated
vendored
Normal file
545
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go
generated
vendored
Normal file
@ -0,0 +1,545 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package populator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||
// create dswp
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "dswp-test-volume-name",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
dswp, fakePodManager, fakesDSW := createDswpWithVolume(t, pv, pvc)
|
||||
|
||||
// create pod
|
||||
containers := []v1.Container{
|
||||
{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "dswp-test-volume-name",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "file-bound", containers)
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
|
||||
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
||||
|
||||
dswp.findAndAddNewPods()
|
||||
|
||||
if !dswp.pods.processedPods[podName] {
|
||||
t.Fatalf("Failed to record that the volumes for the specified pod: %s have been processed by the populator", podName)
|
||||
}
|
||||
|
||||
expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)
|
||||
|
||||
volumeExists := fakesDSW.VolumeExists(expectedVolumeName)
|
||||
if !volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) failed. Expected: <true> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
|
||||
if podExistsInVolume := fakesDSW.PodExistsInVolume(
|
||||
podName, expectedVolumeName); !podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, v1.UniqueVolumeName(generatedVolumeName), false /* expectReportedInUse */, fakesDSW)
|
||||
|
||||
//let the pod be terminated
|
||||
podGet, exist := fakePodManager.GetPodByName(pod.Namespace, pod.Name)
|
||||
if !exist {
|
||||
t.Fatalf("Failed to get pod by pod name: %s and namespace: %s", pod.Name, pod.Namespace)
|
||||
}
|
||||
podGet.Status.Phase = v1.PodFailed
|
||||
|
||||
//pod is added to fakePodManager but fakeRuntime can not get the pod,so here findAndRemoveDeletedPods() will remove the pod and volumes it is mounted
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
|
||||
if dswp.pods.processedPods[podName] {
|
||||
t.Fatalf("Failed to remove pods from desired state of world since they no longer exist")
|
||||
}
|
||||
|
||||
volumeExists = fakesDSW.VolumeExists(expectedVolumeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) failed. Expected: <false> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
|
||||
if podExistsInVolume := fakesDSW.PodExistsInVolume(
|
||||
podName, expectedVolumeName); podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <false> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
|
||||
volumesToMount := fakesDSW.GetVolumesToMount()
|
||||
for _, volume := range volumesToMount {
|
||||
if volume.VolumeName == expectedVolumeName {
|
||||
t.Fatalf(
|
||||
"Found volume %v in the list of desired state of world volumes to mount. Expected not",
|
||||
expectedVolumeName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFindAndAddNewPods_FindAndRemoveDeletedPods_Valid_Block_VolumeDevices(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeBlock
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "block-bound"},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "dswp-test-volume-name",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
dswp, fakePodManager, fakesDSW := createDswpWithVolume(t, pv, pvc)
|
||||
|
||||
// create pod
|
||||
containers := []v1.Container{
|
||||
{
|
||||
VolumeDevices: []v1.VolumeDevice{
|
||||
{
|
||||
Name: "dswp-test-volume-name",
|
||||
DevicePath: "/dev/sdb",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "block-bound", containers)
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
|
||||
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
||||
|
||||
dswp.findAndAddNewPods()
|
||||
|
||||
if !dswp.pods.processedPods[podName] {
|
||||
t.Fatalf("Failed to record that the volumes for the specified pod: %s have been processed by the populator", podName)
|
||||
}
|
||||
|
||||
expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)
|
||||
|
||||
volumeExists := fakesDSW.VolumeExists(expectedVolumeName)
|
||||
if !volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) failed. Expected: <true> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
|
||||
if podExistsInVolume := fakesDSW.PodExistsInVolume(
|
||||
podName, expectedVolumeName); !podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, v1.UniqueVolumeName(generatedVolumeName), false /* expectReportedInUse */, fakesDSW)
|
||||
|
||||
//let the pod be terminated
|
||||
podGet, exist := fakePodManager.GetPodByName(pod.Namespace, pod.Name)
|
||||
if !exist {
|
||||
t.Fatalf("Failed to get pod by pod name: %s and namespace: %s", pod.Name, pod.Namespace)
|
||||
}
|
||||
podGet.Status.Phase = v1.PodFailed
|
||||
|
||||
//pod is added to fakePodManager but fakeRuntime can not get the pod,so here findAndRemoveDeletedPods() will remove the pod and volumes it is mounted
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
|
||||
if dswp.pods.processedPods[podName] {
|
||||
t.Fatalf("Failed to remove pods from desired state of world since they no longer exist")
|
||||
}
|
||||
|
||||
volumeExists = fakesDSW.VolumeExists(expectedVolumeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) failed. Expected: <false> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
|
||||
if podExistsInVolume := fakesDSW.PodExistsInVolume(
|
||||
podName, expectedVolumeName); podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <false> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
|
||||
volumesToMount := fakesDSW.GetVolumesToMount()
|
||||
for _, volume := range volumesToMount {
|
||||
if volume.VolumeName == expectedVolumeName {
|
||||
t.Fatalf(
|
||||
"Found volume %v in the list of desired state of world volumes to mount. Expected not",
|
||||
expectedVolumeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func TestCreateVolumeSpec_Valid_File_VolumeMounts(t *testing.T) {
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "dswp-test-volume-name",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
dswp, fakePodManager, _ := createDswpWithVolume(t, pv, pvc)
|
||||
|
||||
// create pod
|
||||
containers := []v1.Container{
|
||||
{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "dswp-test-volume-name",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "file-bound", containers)
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec == nil || err != nil {
|
||||
t.Fatalf("Failed to create volumeSpec with combination of filesystem mode and volumeMounts. err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateVolumeSpec_Valid_Block_VolumeDevices(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeBlock
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "block-bound"},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "dswp-test-volume-name",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
dswp, fakePodManager, _ := createDswpWithVolume(t, pv, pvc)
|
||||
|
||||
// create pod
|
||||
containers := []v1.Container{
|
||||
{
|
||||
VolumeDevices: []v1.VolumeDevice{
|
||||
{
|
||||
Name: "dswp-test-volume-name",
|
||||
DevicePath: "/dev/sdb",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "block-bound", containers)
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec == nil || err != nil {
|
||||
t.Fatalf("Failed to create volumeSpec with combination of block mode and volumeDevices. err: %v", err)
|
||||
}
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func TestCreateVolumeSpec_Invalid_File_VolumeDevices(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "dswp-test-volume-name",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
dswp, fakePodManager, _ := createDswpWithVolume(t, pv, pvc)
|
||||
|
||||
// create pod
|
||||
containers := []v1.Container{
|
||||
{
|
||||
VolumeDevices: []v1.VolumeDevice{
|
||||
{
|
||||
Name: "dswp-test-volume-name",
|
||||
DevicePath: "/dev/sdb",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "file-bound", containers)
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec != nil || err == nil {
|
||||
t.Fatalf("Unexpected volumeMode and volumeMounts/volumeDevices combination is accepted")
|
||||
}
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeBlock
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dswp-test-volume-name",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "block-bound"},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "dswp-test-volume-name",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
dswp, fakePodManager, _ := createDswpWithVolume(t, pv, pvc)
|
||||
|
||||
// create pod
|
||||
containers := []v1.Container{
|
||||
{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "dswp-test-volume-name",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "block-bound", containers)
|
||||
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
|
||||
volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec != nil || err == nil {
|
||||
t.Fatalf("Unexpected volumeMode and volumeMounts/volumeDevices combination is accepted")
|
||||
}
|
||||
|
||||
// Rollback feature gate to false.
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
func verifyVolumeExistsInVolumesToMount(t *testing.T, expectedVolumeName v1.UniqueVolumeName, expectReportedInUse bool, dsw cache.DesiredStateOfWorld) {
|
||||
volumesToMount := dsw.GetVolumesToMount()
|
||||
for _, volume := range volumesToMount {
|
||||
if volume.VolumeName == expectedVolumeName {
|
||||
if volume.ReportedInUse != expectReportedInUse {
|
||||
t.Fatalf(
|
||||
"Found volume %v in the list of VolumesToMount, but ReportedInUse incorrect. Expected: <%v> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
expectReportedInUse,
|
||||
volume.ReportedInUse)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf(
|
||||
"Could not find volume %v in the list of desired state of world volumes to mount %+v",
|
||||
expectedVolumeName,
|
||||
volumesToMount)
|
||||
}
|
||||
|
||||
func createPodWithVolume(pod, pv, pvc string, containers []v1.Container) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod,
|
||||
UID: "dswp-test-pod-uid",
|
||||
Namespace: "dswp-test",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: pv,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "dswp-test-fake-device",
|
||||
},
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: containers,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPhase("Running"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createDswpWithVolume(t *testing.T, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) (*desiredStateOfWorldPopulator, kubepod.Manager, cache.DesiredStateOfWorld) {
|
||||
fakeVolumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, pvc, nil
|
||||
})
|
||||
fakeClient.AddReactor("get", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, pv, nil
|
||||
})
|
||||
|
||||
fakeSecretManager := secret.NewFakeManager()
|
||||
fakeConfigMapManager := configmap.NewFakeManager()
|
||||
fakePodManager := kubepod.NewBasicPodManager(
|
||||
podtest.NewFakeMirrorClient(), fakeSecretManager, fakeConfigMapManager)
|
||||
|
||||
fakesDSW := cache.NewDesiredStateOfWorld(fakeVolumePluginMgr)
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
|
||||
fakeStatusManager := status.NewManager(fakeClient, fakePodManager, &statustest.FakePodDeletionSafetyProvider{})
|
||||
|
||||
dswp := &desiredStateOfWorldPopulator{
|
||||
kubeClient: fakeClient,
|
||||
loopSleepDuration: 100 * time.Millisecond,
|
||||
getPodStatusRetryDuration: 2 * time.Second,
|
||||
podManager: fakePodManager,
|
||||
podStatusProvider: fakeStatusManager,
|
||||
desiredStateOfWorld: fakesDSW,
|
||||
pods: processedPods{
|
||||
processedPods: make(map[types.UniquePodName]bool)},
|
||||
kubeContainerRuntime: fakeRuntime,
|
||||
keepTerminatedPodVolumes: false,
|
||||
}
|
||||
return dswp, fakePodManager, fakesDSW
|
||||
}
|
73
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["reconciler.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util/nestedpendingoperations:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["reconciler_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
672
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
Normal file
672
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
Normal file
@ -0,0 +1,672 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package reconciler implements interfaces that attempt to reconcile the
|
||||
// desired state of the with the actual state of the world by triggering
|
||||
// relevant actions (attach, detach, mount, unmount).
|
||||
package reconciler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
utilfile "k8s.io/kubernetes/pkg/util/file"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
volumepkg "k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// Reconciler runs a periodic loop to reconcile the desired state of the world
|
||||
// with the actual state of the world by triggering attach, detach, mount, and
|
||||
// unmount operations.
|
||||
// Note: This is distinct from the Reconciler implemented by the attach/detach
|
||||
// controller. This reconciles state for the kubelet volume manager. That
|
||||
// reconciles state for the attach/detach controller.
|
||||
type Reconciler interface {
|
||||
// Starts running the reconciliation loop which executes periodically, checks
|
||||
// if volumes that should be mounted are mounted and volumes that should
|
||||
// be unmounted are unmounted. If not, it will trigger mount/unmount
|
||||
// operations to rectify.
|
||||
// If attach/detach management is enabled, the manager will also check if
|
||||
// volumes that should be attached are attached and volumes that should
|
||||
// be detached are detached and trigger attach/detach operations as needed.
|
||||
Run(stopCh <-chan struct{})
|
||||
|
||||
// StatesHasBeenSynced returns true only after syncStates process starts to sync
|
||||
// states at least once after kubelet starts
|
||||
StatesHasBeenSynced() bool
|
||||
}
|
||||
|
||||
// NewReconciler returns a new instance of Reconciler.
|
||||
//
|
||||
// controllerAttachDetachEnabled - if true, indicates that the attach/detach
|
||||
// controller is responsible for managing the attach/detach operations for
|
||||
// this node, and therefore the volume manager should not
|
||||
// loopSleepDuration - the amount of time the reconciler loop sleeps between
|
||||
// successive executions
|
||||
// syncDuration - the amount of time the syncStates sleeps between
|
||||
// successive executions
|
||||
// waitForAttachTimeout - the amount of time the Mount function will wait for
|
||||
// the volume to be attached
|
||||
// nodeName - the Name for this node, used by Attach and Detach methods
|
||||
// desiredStateOfWorld - cache containing the desired state of the world
|
||||
// actualStateOfWorld - cache containing the actual state of the world
|
||||
// populatorHasAddedPods - checker for whether the populator has finished
|
||||
// adding pods to the desiredStateOfWorld cache at least once after sources
|
||||
// are all ready (before sources are ready, pods are probably missing)
|
||||
// operationExecutor - used to trigger attach/detach/mount/unmount operations
|
||||
// safely (prevents more than one operation from being triggered on the same
|
||||
// volume)
|
||||
// mounter - mounter passed in from kubelet, passed down unmount path
|
||||
// volumePluginMrg - volume plugin manager passed from kubelet
|
||||
func NewReconciler(
|
||||
kubeClient clientset.Interface,
|
||||
controllerAttachDetachEnabled bool,
|
||||
loopSleepDuration time.Duration,
|
||||
syncDuration time.Duration,
|
||||
waitForAttachTimeout time.Duration,
|
||||
nodeName types.NodeName,
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld,
|
||||
actualStateOfWorld cache.ActualStateOfWorld,
|
||||
populatorHasAddedPods func() bool,
|
||||
operationExecutor operationexecutor.OperationExecutor,
|
||||
mounter mount.Interface,
|
||||
volumePluginMgr *volumepkg.VolumePluginMgr,
|
||||
kubeletPodsDir string) Reconciler {
|
||||
return &reconciler{
|
||||
kubeClient: kubeClient,
|
||||
controllerAttachDetachEnabled: controllerAttachDetachEnabled,
|
||||
loopSleepDuration: loopSleepDuration,
|
||||
syncDuration: syncDuration,
|
||||
waitForAttachTimeout: waitForAttachTimeout,
|
||||
nodeName: nodeName,
|
||||
desiredStateOfWorld: desiredStateOfWorld,
|
||||
actualStateOfWorld: actualStateOfWorld,
|
||||
populatorHasAddedPods: populatorHasAddedPods,
|
||||
operationExecutor: operationExecutor,
|
||||
mounter: mounter,
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
kubeletPodsDir: kubeletPodsDir,
|
||||
timeOfLastSync: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
type reconciler struct {
|
||||
kubeClient clientset.Interface
|
||||
controllerAttachDetachEnabled bool
|
||||
loopSleepDuration time.Duration
|
||||
syncDuration time.Duration
|
||||
waitForAttachTimeout time.Duration
|
||||
nodeName types.NodeName
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||
actualStateOfWorld cache.ActualStateOfWorld
|
||||
populatorHasAddedPods func() bool
|
||||
operationExecutor operationexecutor.OperationExecutor
|
||||
mounter mount.Interface
|
||||
volumePluginMgr *volumepkg.VolumePluginMgr
|
||||
kubeletPodsDir string
|
||||
timeOfLastSync time.Time
|
||||
}
|
||||
|
||||
func (rc *reconciler) Run(stopCh <-chan struct{}) {
|
||||
// Wait for the populator to indicate that it has actually populated the desired state of world, meaning it has
|
||||
// completed a populate loop that started after sources are all ready. After, there's no need to keep checking.
|
||||
wait.PollUntil(rc.loopSleepDuration, func() (bool, error) {
|
||||
rc.reconciliationLoopFunc(rc.populatorHasAddedPods())()
|
||||
return rc.populatorHasAddedPods(), nil
|
||||
}, stopCh)
|
||||
wait.Until(rc.reconciliationLoopFunc(true), rc.loopSleepDuration, stopCh)
|
||||
}
|
||||
|
||||
func (rc *reconciler) reconciliationLoopFunc(populatorHasAddedPods bool) func() {
|
||||
return func() {
|
||||
rc.reconcile()
|
||||
|
||||
// Add a check that the populator has added pods so that reconciler's reconstruct process will start
|
||||
// after desired state of world is populated with pod volume information. Otherwise, reconciler's
|
||||
// reconstruct process may add incomplete volume information and cause confusion. In addition, if the
|
||||
// desired state of world has not been populated yet, the reconstruct process may clean up pods' volumes
|
||||
// that are still in use because desired state of world does not contain a complete list of pods.
|
||||
if populatorHasAddedPods && time.Since(rc.timeOfLastSync) > rc.syncDuration {
|
||||
glog.V(5).Infof("Desired state of world has been populated with pods, starting reconstruct state function")
|
||||
rc.sync()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *reconciler) reconcile() {
|
||||
// Unmounts are triggered before mounts so that a volume that was
|
||||
// referenced by a pod that was deleted and is now referenced by another
|
||||
// pod is unmounted from the first pod before being mounted to the new
|
||||
// pod.
|
||||
|
||||
// Ensure volumes that should be unmounted are unmounted.
|
||||
for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() {
|
||||
if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) {
|
||||
volumeHandler, err := operationexecutor.NewVolumeHandler(mountedVolume.VolumeSpec, rc.operationExecutor)
|
||||
if err != nil {
|
||||
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountVolume failed"), err).Error())
|
||||
continue
|
||||
}
|
||||
err = volumeHandler.UnmountVolumeHandler(mountedVolume.MountedVolume, rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(mountedVolume.GenerateMsgDetailed("operationExecutor.UnmountVolume started", ""))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure volumes that should be attached/mounted are attached/mounted.
|
||||
for _, volumeToMount := range rc.desiredStateOfWorld.GetVolumesToMount() {
|
||||
volMounted, devicePath, err := rc.actualStateOfWorld.PodExistsInVolume(volumeToMount.PodName, volumeToMount.VolumeName)
|
||||
volumeToMount.DevicePath = devicePath
|
||||
if cache.IsVolumeNotAttachedError(err) {
|
||||
if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable {
|
||||
// Volume is not attached (or doesn't implement attacher), kubelet attach is disabled, wait
|
||||
// for controller to finish attaching volume.
|
||||
glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""))
|
||||
err := rc.operationExecutor.VerifyControllerAttachedVolume(
|
||||
volumeToMount.VolumeToMount,
|
||||
rc.nodeName,
|
||||
rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.VerifyControllerAttachedVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume started", ""))
|
||||
}
|
||||
} else {
|
||||
// Volume is not attached to node, kubelet attach is enabled, volume implements an attacher,
|
||||
// so attach it
|
||||
volumeToAttach := operationexecutor.VolumeToAttach{
|
||||
VolumeName: volumeToMount.VolumeName,
|
||||
VolumeSpec: volumeToMount.VolumeSpec,
|
||||
NodeName: rc.nodeName,
|
||||
}
|
||||
glog.V(12).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""))
|
||||
err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.AttachVolume started", ""))
|
||||
}
|
||||
}
|
||||
} else if !volMounted || cache.IsRemountRequiredError(err) {
|
||||
// Volume is not mounted, or is already mounted, but requires remounting
|
||||
remountingLogStr := ""
|
||||
isRemount := cache.IsRemountRequiredError(err)
|
||||
if isRemount {
|
||||
remountingLogStr = "Volume is already mounted to pod, but remount was requested."
|
||||
}
|
||||
volumeHandler, err := operationexecutor.NewVolumeHandler(volumeToMount.VolumeSpec, rc.operationExecutor)
|
||||
if err != nil {
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for MountVolume failed"), err).Error())
|
||||
continue
|
||||
}
|
||||
err = volumeHandler.MountVolumeHandler(rc.waitForAttachTimeout, volumeToMount.VolumeToMount, rc.actualStateOfWorld, isRemount, remountingLogStr)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.MountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
if remountingLogStr == "" {
|
||||
glog.V(1).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr))
|
||||
} else {
|
||||
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure devices that should be detached/unmounted are detached/unmounted.
|
||||
for _, attachedVolume := range rc.actualStateOfWorld.GetUnmountedVolumes() {
|
||||
// Check IsOperationPending to avoid marking a volume as detached if it's in the process of mounting.
|
||||
if !rc.desiredStateOfWorld.VolumeExists(attachedVolume.VolumeName) &&
|
||||
!rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName) {
|
||||
if attachedVolume.GloballyMounted {
|
||||
volumeHandler, err := operationexecutor.NewVolumeHandler(attachedVolume.VolumeSpec, rc.operationExecutor)
|
||||
if err != nil {
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountDevice failed"), err).Error())
|
||||
continue
|
||||
}
|
||||
err = volumeHandler.UnmountDeviceHandler(attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountDevice failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.UnmountDevice started", ""))
|
||||
}
|
||||
} else {
|
||||
// Volume is attached to node, detach it
|
||||
// Kubelet not responsible for detaching or this volume has a non-attachable volume plugin.
|
||||
if rc.controllerAttachDetachEnabled || !attachedVolume.PluginIsAttachable {
|
||||
rc.actualStateOfWorld.MarkVolumeAsDetached(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath)))
|
||||
} else {
|
||||
// Only detach if kubelet detach is enabled
|
||||
glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
|
||||
err := rc.operationExecutor.DetachVolume(
|
||||
attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.DetachVolume started", ""))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sync process tries to observe the real world by scanning all pods' volume directories from the disk.
|
||||
// If the actual and desired state of worlds are not consistent with the observed world, it means that some
|
||||
// mounted volumes are left out probably during kubelet restart. This process will reconstruct
|
||||
// the volumes and udpate the actual and desired states. In the following reconciler loop, those volumes will
|
||||
// be cleaned up.
|
||||
func (rc *reconciler) sync() {
|
||||
defer rc.updateLastSyncTime()
|
||||
rc.syncStates(rc.kubeletPodsDir)
|
||||
}
|
||||
|
||||
func (rc *reconciler) updateLastSyncTime() {
|
||||
rc.timeOfLastSync = time.Now()
|
||||
}
|
||||
|
||||
func (rc *reconciler) StatesHasBeenSynced() bool {
|
||||
return !rc.timeOfLastSync.IsZero()
|
||||
}
|
||||
|
||||
type podVolume struct {
|
||||
podName volumetypes.UniquePodName
|
||||
volumeSpecName string
|
||||
mountPath string
|
||||
pluginName string
|
||||
volumeMode v1.PersistentVolumeMode
|
||||
}
|
||||
|
||||
type reconstructedVolume struct {
|
||||
volumeName v1.UniqueVolumeName
|
||||
podName volumetypes.UniquePodName
|
||||
volumeSpec *volumepkg.Spec
|
||||
outerVolumeSpecName string
|
||||
pod *v1.Pod
|
||||
pluginIsAttachable bool
|
||||
volumeGidValue string
|
||||
devicePath string
|
||||
reportedInUse bool
|
||||
mounter volumepkg.Mounter
|
||||
blockVolumeMapper volumepkg.BlockVolumeMapper
|
||||
}
|
||||
|
||||
// reconstructFromDisk scans the volume directories under the given pod directory. If the volume is not
|
||||
// in either actual or desired state of world, or pending operation, this function will reconstruct
|
||||
// the volume spec and put it in both the actual and desired state of worlds. If no running
|
||||
// container is mounting the volume, the volume will be removed by desired state of world's populator and
|
||||
// cleaned up by the reconciler.
|
||||
func (rc *reconciler) syncStates(podsDir string) {
|
||||
// Get volumes information by reading the pod's directory
|
||||
podVolumes, err := getVolumesFromPodDir(podsDir)
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot get volumes from disk %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume)
|
||||
for _, volume := range podVolumes {
|
||||
reconstructedVolume, err := rc.reconstructVolume(volume)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not construct volume information: %v", err)
|
||||
continue
|
||||
}
|
||||
// Check if there is an pending operation for the given pod and volume.
|
||||
// Need to check pending operation before checking the actual and desired
|
||||
// states to avoid race condition during checking. For example, the following
|
||||
// might happen if pending operation is checked after checking actual and desired states.
|
||||
// 1. Checking the pod and it does not exist in either actual or desired state.
|
||||
// 2. An operation for the given pod finishes and the actual state is updated.
|
||||
// 3. Checking and there is no pending operation for the given pod.
|
||||
// During state reconstruction period, no new volume operations could be issued. If the
|
||||
// mounted path is not in either pending operation, or actual or desired states, this
|
||||
// volume needs to be reconstructed back to the states.
|
||||
pending := rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, reconstructedVolume.podName)
|
||||
dswExist := rc.desiredStateOfWorld.PodExistsInVolume(reconstructedVolume.podName, reconstructedVolume.volumeName)
|
||||
aswExist, _, _ := rc.actualStateOfWorld.PodExistsInVolume(reconstructedVolume.podName, reconstructedVolume.volumeName)
|
||||
|
||||
if !rc.StatesHasBeenSynced() {
|
||||
// In case this is the first time to reconstruct state after kubelet starts, for a persistant volume, it must have
|
||||
// been mounted before kubelet restarts because no mount operations could be started at this time (node
|
||||
// status has not yet been updated before this very first syncStates finishes, so that VerifyControllerAttachedVolume will fail),
|
||||
// In this case, the volume state should be put back to actual state now no matter desired state has it or not.
|
||||
// This is to prevent node status from being updated to empty for attachable volumes. This might happen because
|
||||
// in the case that a volume is discovered on disk, and it is part of desired state, but is then quickly deleted
|
||||
// from the desired state. If in such situation, the volume is not added to the actual state, the node status updater will
|
||||
// not get this volume from either actual or desired state. In turn, this might cause master controller
|
||||
// detaching while the volume is still mounted.
|
||||
if aswExist || !reconstructedVolume.pluginIsAttachable {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// Check pending first since no new operations could be started at this point.
|
||||
// Otherwise there might a race condition in checking actual states and pending operations
|
||||
if pending || dswExist || aswExist {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof(
|
||||
"Reconciler sync states: could not find pod information in desired or actual states or pending operation, update it in both states: %+v",
|
||||
reconstructedVolume)
|
||||
volumesNeedUpdate[reconstructedVolume.volumeName] = reconstructedVolume
|
||||
|
||||
}
|
||||
|
||||
if len(volumesNeedUpdate) > 0 {
|
||||
if err = rc.updateStates(volumesNeedUpdate); err != nil {
|
||||
glog.Errorf("Error occurred during reconstruct volume from disk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Reconstruct Volume object and reconstructedVolume data structure by reading the pod's volume directories
|
||||
func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, error) {
|
||||
// plugin initializations
|
||||
plugin, err := rc.volumePluginMgr.FindPluginByName(volume.pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attachablePlugin, err := rc.volumePluginMgr.FindAttachablePluginByName(volume.pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create volumeSpec
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(volume.podName),
|
||||
},
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
var mapperPlugin volumepkg.BlockVolumePlugin
|
||||
tmpSpec := &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
mapperPlugin, err = rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpSpec = &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volume.volumeMode}}}
|
||||
}
|
||||
volumeHandler, err := operationexecutor.NewVolumeHandler(tmpSpec, rc.operationExecutor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volumeSpec, err := volumeHandler.ReconstructVolumeHandler(
|
||||
plugin,
|
||||
mapperPlugin,
|
||||
pod.UID,
|
||||
volume.podName,
|
||||
volume.volumeSpecName,
|
||||
volume.mountPath,
|
||||
volume.pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeName, err := plugin.GetVolumeName(volumeSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var uniqueVolumeName v1.UniqueVolumeName
|
||||
if attachablePlugin != nil {
|
||||
uniqueVolumeName = volumehelper.GetUniqueVolumeName(volume.pluginName, volumeName)
|
||||
} else {
|
||||
uniqueVolumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
|
||||
}
|
||||
|
||||
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
||||
isExist, checkErr := volumeHandler.CheckVolumeExistence(volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
if checkErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If mount or symlink doesn't exist, volume reconstruction should be failed
|
||||
if !isExist {
|
||||
return nil, fmt.Errorf("Volume: %q is not mounted", uniqueVolumeName)
|
||||
}
|
||||
|
||||
volumeMounter, newMounterErr := plugin.NewMounter(
|
||||
volumeSpec,
|
||||
pod,
|
||||
volumepkg.VolumeOptions{})
|
||||
if newMounterErr != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"reconstructVolume.NewMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||
uniqueVolumeName,
|
||||
volumeSpec.Name(),
|
||||
volume.podName,
|
||||
pod.UID,
|
||||
newMounterErr)
|
||||
}
|
||||
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
var volumeMapper volumepkg.BlockVolumeMapper
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
var newMapperErr error
|
||||
if mapperPlugin != nil {
|
||||
volumeMapper, newMapperErr = mapperPlugin.NewBlockVolumeMapper(
|
||||
volumeSpec,
|
||||
pod,
|
||||
volumepkg.VolumeOptions{})
|
||||
if newMapperErr != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"reconstructVolume.NewBlockVolumeMapper failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||
uniqueVolumeName,
|
||||
volumeSpec.Name(),
|
||||
volume.podName,
|
||||
pod.UID,
|
||||
newMapperErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reconstructedVolume := &reconstructedVolume{
|
||||
volumeName: uniqueVolumeName,
|
||||
podName: volume.podName,
|
||||
volumeSpec: volumeSpec,
|
||||
// volume.volumeSpecName is actually InnerVolumeSpecName. But this information will likely to be updated in updateStates()
|
||||
// by checking the desired state volumeToMount list and getting the real OuterVolumeSpecName.
|
||||
// In case the pod is deleted during this period and desired state does not have this information, it will not be used
|
||||
// for volume cleanup.
|
||||
outerVolumeSpecName: volume.volumeSpecName,
|
||||
pod: pod,
|
||||
pluginIsAttachable: attachablePlugin != nil,
|
||||
volumeGidValue: "",
|
||||
devicePath: "",
|
||||
mounter: volumeMounter,
|
||||
blockVolumeMapper: volumeMapper,
|
||||
}
|
||||
return reconstructedVolume, nil
|
||||
}
|
||||
|
||||
func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error {
|
||||
// Get the node status to retrieve volume device path information.
|
||||
node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{})
|
||||
if fetchErr != nil {
|
||||
glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr)
|
||||
} else {
|
||||
for _, attachedVolume := range node.Status.VolumesAttached {
|
||||
if volume, exists := volumesNeedUpdate[attachedVolume.Name]; exists {
|
||||
volume.devicePath = attachedVolume.DevicePath
|
||||
volumesNeedUpdate[attachedVolume.Name] = volume
|
||||
glog.V(4).Infof("Update devicePath from node status for volume (%q): %q", attachedVolume.Name, volume.devicePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the list of volumes from desired state and update OuterVolumeSpecName if the information is available
|
||||
volumesToMount := rc.desiredStateOfWorld.GetVolumesToMount()
|
||||
for _, volumeToMount := range volumesToMount {
|
||||
if volume, exists := volumesNeedUpdate[volumeToMount.VolumeName]; exists {
|
||||
volume.outerVolumeSpecName = volumeToMount.OuterVolumeSpecName
|
||||
volumesNeedUpdate[volumeToMount.VolumeName] = volume
|
||||
glog.V(4).Infof("Update OuterVolumeSpecName from desired state for volume (%q): %q",
|
||||
volumeToMount.VolumeName, volume.outerVolumeSpecName)
|
||||
}
|
||||
}
|
||||
for _, volume := range volumesNeedUpdate {
|
||||
err := rc.actualStateOfWorld.MarkVolumeAsAttached(
|
||||
volume.volumeName, volume.volumeSpec, "" /* nodeName */, volume.devicePath)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add volume information to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = rc.actualStateOfWorld.AddPodToVolume(
|
||||
volume.podName,
|
||||
types.UID(volume.podName),
|
||||
volume.volumeName,
|
||||
volume.mounter,
|
||||
volume.blockVolumeMapper,
|
||||
volume.outerVolumeSpecName,
|
||||
volume.volumeGidValue)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add pod to volume information to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
if volume.pluginIsAttachable {
|
||||
err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not mark device is mounted to actual state of world: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.Infof("Volume: %v is mounted", volume.volumeName)
|
||||
}
|
||||
|
||||
_, err = rc.desiredStateOfWorld.AddPodToVolume(volume.podName,
|
||||
volume.pod,
|
||||
volume.volumeSpec,
|
||||
volume.outerVolumeSpecName,
|
||||
volume.volumeGidValue)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add pod to volume information to desired state of world: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVolumesFromPodDir scans through the volumes directories under the given pod directory.
|
||||
// It returns a list of pod volume information including pod's uid, volume's plugin name, mount path,
|
||||
// and volume spec name.
|
||||
func getVolumesFromPodDir(podDir string) ([]podVolume, error) {
|
||||
podsDirInfo, err := ioutil.ReadDir(podDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volumes := []podVolume{}
|
||||
for i := range podsDirInfo {
|
||||
if !podsDirInfo[i].IsDir() {
|
||||
continue
|
||||
}
|
||||
podName := podsDirInfo[i].Name()
|
||||
podDir := path.Join(podDir, podName)
|
||||
|
||||
// Find filesystem volume information
|
||||
// ex. filesystem volume: /pods/{podUid}/volume/{escapeQualifiedPluginName}/{volumeName}
|
||||
volumesDirs := map[v1.PersistentVolumeMode]string{
|
||||
v1.PersistentVolumeFilesystem: path.Join(podDir, config.DefaultKubeletVolumesDirName),
|
||||
}
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
// Find block volume information
|
||||
// ex. block volume: /pods/{podUid}/volumeDevices/{escapeQualifiedPluginName}/{volumeName}
|
||||
volumesDirs[v1.PersistentVolumeBlock] = path.Join(podDir, config.DefaultKubeletVolumeDevicesDirName)
|
||||
}
|
||||
for volumeMode, volumesDir := range volumesDirs {
|
||||
var volumesDirInfo []os.FileInfo
|
||||
if volumesDirInfo, err = ioutil.ReadDir(volumesDir); err != nil {
|
||||
// Just skip the loop becuase given volumesDir doesn't exist depending on volumeMode
|
||||
continue
|
||||
}
|
||||
for _, volumeDir := range volumesDirInfo {
|
||||
pluginName := volumeDir.Name()
|
||||
volumePluginPath := path.Join(volumesDir, pluginName)
|
||||
volumePluginDirs, err := utilfile.ReadDirNoStat(volumePluginPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err)
|
||||
continue
|
||||
}
|
||||
unescapePluginName := utilstrings.UnescapeQualifiedNameForDisk(pluginName)
|
||||
for _, volumeName := range volumePluginDirs {
|
||||
mountPath := path.Join(volumePluginPath, volumeName)
|
||||
glog.V(5).Infof("podName: %v, mount path from volume plugin directory: %v, ", podName, mountPath)
|
||||
volumes = append(volumes, podVolume{
|
||||
podName: volumetypes.UniquePodName(podName),
|
||||
volumeSpecName: volumeName,
|
||||
mountPath: mountPath,
|
||||
pluginName: unescapePluginName,
|
||||
volumeMode: volumeMode,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(10).Infof("Get volumes from pod directory %q %+v", podDir, volumes)
|
||||
return volumes, nil
|
||||
}
|
1025
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
Normal file
1025
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
443
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go
generated
vendored
Normal file
443
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go
generated
vendored
Normal file
@ -0,0 +1,443 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volumemanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
const (
|
||||
// reconcilerLoopSleepPeriod is the amount of time the reconciler loop waits
|
||||
// between successive executions
|
||||
reconcilerLoopSleepPeriod time.Duration = 100 * time.Millisecond
|
||||
|
||||
// reconcilerSyncStatesSleepPeriod is the amount of time the reconciler reconstruct process
|
||||
// waits between successive executions
|
||||
reconcilerSyncStatesSleepPeriod time.Duration = 3 * time.Minute
|
||||
|
||||
// desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the
|
||||
// DesiredStateOfWorldPopulator loop waits between successive executions
|
||||
desiredStateOfWorldPopulatorLoopSleepPeriod time.Duration = 100 * time.Millisecond
|
||||
|
||||
// desiredStateOfWorldPopulatorGetPodStatusRetryDuration is the amount of
|
||||
// time the DesiredStateOfWorldPopulator loop waits between successive pod
|
||||
// cleanup calls (to prevent calling containerruntime.GetPodStatus too
|
||||
// frequently).
|
||||
desiredStateOfWorldPopulatorGetPodStatusRetryDuration time.Duration = 2 * time.Second
|
||||
|
||||
// podAttachAndMountTimeout is the maximum amount of time the
|
||||
// WaitForAttachAndMount call will wait for all volumes in the specified pod
|
||||
// to be attached and mounted. Even though cloud operations can take several
|
||||
// minutes to complete, we set the timeout to 2 minutes because kubelet
|
||||
// will retry in the next sync iteration. This frees the associated
|
||||
// goroutine of the pod to process newer updates if needed (e.g., a delete
|
||||
// request to the pod).
|
||||
// Value is slightly offset from 2 minutes to make timeouts due to this
|
||||
// constant recognizable.
|
||||
podAttachAndMountTimeout time.Duration = 2*time.Minute + 3*time.Second
|
||||
|
||||
// podAttachAndMountRetryInterval is the amount of time the GetVolumesForPod
|
||||
// call waits before retrying
|
||||
podAttachAndMountRetryInterval time.Duration = 300 * time.Millisecond
|
||||
|
||||
// waitForAttachTimeout is the maximum amount of time a
|
||||
// operationexecutor.Mount call will wait for a volume to be attached.
|
||||
// Set to 10 minutes because we've seen attach operations take several
|
||||
// minutes to complete for some volume plugins in some cases. While this
|
||||
// operation is waiting it only blocks other operations on the same device,
|
||||
// other devices are not affected.
|
||||
waitForAttachTimeout time.Duration = 10 * time.Minute
|
||||
)
|
||||
|
||||
// VolumeManager runs a set of asynchronous loops that figure out which volumes
|
||||
// need to be attached/mounted/unmounted/detached based on the pods scheduled on
|
||||
// this node and makes it so.
|
||||
type VolumeManager interface {
|
||||
// Starts the volume manager and all the asynchronous loops that it controls
|
||||
Run(sourcesReady config.SourcesReady, stopCh <-chan struct{})
|
||||
|
||||
// WaitForAttachAndMount processes the volumes referenced in the specified
|
||||
// pod and blocks until they are all attached and mounted (reflected in
|
||||
// actual state of the world).
|
||||
// An error is returned if all volumes are not attached and mounted within
|
||||
// the duration defined in podAttachAndMountTimeout.
|
||||
WaitForAttachAndMount(pod *v1.Pod) error
|
||||
|
||||
// GetMountedVolumesForPod returns a VolumeMap containing the volumes
|
||||
// referenced by the specified pod that are successfully attached and
|
||||
// mounted. The key in the map is the OuterVolumeSpecName (i.e.
|
||||
// pod.Spec.Volumes[x].Name). It returns an empty VolumeMap if pod has no
|
||||
// volumes.
|
||||
GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap
|
||||
|
||||
// GetExtraSupplementalGroupsForPod returns a list of the extra
|
||||
// supplemental groups for the Pod. These extra supplemental groups come
|
||||
// from annotations on persistent volumes that the pod depends on.
|
||||
GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64
|
||||
|
||||
// GetVolumesInUse returns a list of all volumes that implement the volume.Attacher
|
||||
// interface and are currently in use according to the actual and desired
|
||||
// state of the world caches. A volume is considered "in use" as soon as it
|
||||
// is added to the desired state of world, indicating it *should* be
|
||||
// attached to this node and remains "in use" until it is removed from both
|
||||
// the desired state of the world and the actual state of the world, or it
|
||||
// has been unmounted (as indicated in actual state of world).
|
||||
GetVolumesInUse() []v1.UniqueVolumeName
|
||||
|
||||
// ReconcilerStatesHasBeenSynced returns true only after the actual states in reconciler
|
||||
// has been synced at least once after kubelet starts so that it is safe to update mounted
|
||||
// volume list retrieved from actual state.
|
||||
ReconcilerStatesHasBeenSynced() bool
|
||||
|
||||
// VolumeIsAttached returns true if the given volume is attached to this
|
||||
// node.
|
||||
VolumeIsAttached(volumeName v1.UniqueVolumeName) bool
|
||||
|
||||
// Marks the specified volume as having successfully been reported as "in
|
||||
// use" in the nodes's volume status.
|
||||
MarkVolumesAsReportedInUse(volumesReportedAsInUse []v1.UniqueVolumeName)
|
||||
}
|
||||
|
||||
// NewVolumeManager returns a new concrete instance implementing the
|
||||
// VolumeManager interface.
|
||||
//
|
||||
// kubeClient - kubeClient is the kube API client used by DesiredStateOfWorldPopulator
|
||||
// to communicate with the API server to fetch PV and PVC objects
|
||||
// volumePluginMgr - the volume plugin manager used to access volume plugins.
|
||||
// Must be pre-initialized.
|
||||
func NewVolumeManager(
|
||||
controllerAttachDetachEnabled bool,
|
||||
nodeName k8stypes.NodeName,
|
||||
podManager pod.Manager,
|
||||
podStatusProvider status.PodStatusProvider,
|
||||
kubeClient clientset.Interface,
|
||||
volumePluginMgr *volume.VolumePluginMgr,
|
||||
kubeContainerRuntime kubecontainer.Runtime,
|
||||
mounter mount.Interface,
|
||||
kubeletPodsDir string,
|
||||
recorder record.EventRecorder,
|
||||
checkNodeCapabilitiesBeforeMount bool,
|
||||
keepTerminatedPodVolumes bool) VolumeManager {
|
||||
|
||||
vm := &volumeManager{
|
||||
kubeClient: kubeClient,
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
desiredStateOfWorld: cache.NewDesiredStateOfWorld(volumePluginMgr),
|
||||
actualStateOfWorld: cache.NewActualStateOfWorld(nodeName, volumePluginMgr),
|
||||
operationExecutor: operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
kubeClient,
|
||||
volumePluginMgr,
|
||||
recorder,
|
||||
checkNodeCapabilitiesBeforeMount,
|
||||
util.NewBlockVolumePathHandler())),
|
||||
}
|
||||
|
||||
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
|
||||
kubeClient,
|
||||
desiredStateOfWorldPopulatorLoopSleepPeriod,
|
||||
desiredStateOfWorldPopulatorGetPodStatusRetryDuration,
|
||||
podManager,
|
||||
podStatusProvider,
|
||||
vm.desiredStateOfWorld,
|
||||
kubeContainerRuntime,
|
||||
keepTerminatedPodVolumes)
|
||||
vm.reconciler = reconciler.NewReconciler(
|
||||
kubeClient,
|
||||
controllerAttachDetachEnabled,
|
||||
reconcilerLoopSleepPeriod,
|
||||
reconcilerSyncStatesSleepPeriod,
|
||||
waitForAttachTimeout,
|
||||
nodeName,
|
||||
vm.desiredStateOfWorld,
|
||||
vm.actualStateOfWorld,
|
||||
vm.desiredStateOfWorldPopulator.HasAddedPods,
|
||||
vm.operationExecutor,
|
||||
mounter,
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
|
||||
return vm
|
||||
}
|
||||
|
||||
// volumeManager implements the VolumeManager interface
|
||||
type volumeManager struct {
|
||||
// kubeClient is the kube API client used by DesiredStateOfWorldPopulator to
|
||||
// communicate with the API server to fetch PV and PVC objects
|
||||
kubeClient clientset.Interface
|
||||
|
||||
// volumePluginMgr is the volume plugin manager used to access volume
|
||||
// plugins. It must be pre-initialized.
|
||||
volumePluginMgr *volume.VolumePluginMgr
|
||||
|
||||
// desiredStateOfWorld is a data structure containing the desired state of
|
||||
// the world according to the volume manager: i.e. what volumes should be
|
||||
// attached and which pods are referencing the volumes).
|
||||
// The data structure is populated by the desired state of the world
|
||||
// populator using the kubelet pod manager.
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||
|
||||
// actualStateOfWorld is a data structure containing the actual state of
|
||||
// the world according to the manager: i.e. which volumes are attached to
|
||||
// this node and what pods the volumes are mounted to.
|
||||
// The data structure is populated upon successful completion of attach,
|
||||
// detach, mount, and unmount actions triggered by the reconciler.
|
||||
actualStateOfWorld cache.ActualStateOfWorld
|
||||
|
||||
// operationExecutor is used to start asynchronous attach, detach, mount,
|
||||
// and unmount operations.
|
||||
operationExecutor operationexecutor.OperationExecutor
|
||||
|
||||
// reconciler runs an asynchronous periodic loop to reconcile the
|
||||
// desiredStateOfWorld with the actualStateOfWorld by triggering attach,
|
||||
// detach, mount, and unmount operations using the operationExecutor.
|
||||
reconciler reconciler.Reconciler
|
||||
|
||||
// desiredStateOfWorldPopulator runs an asynchronous periodic loop to
|
||||
// populate the desiredStateOfWorld using the kubelet PodManager.
|
||||
desiredStateOfWorldPopulator populator.DesiredStateOfWorldPopulator
|
||||
}
|
||||
|
||||
func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
go vm.desiredStateOfWorldPopulator.Run(sourcesReady, stopCh)
|
||||
glog.V(2).Infof("The desired_state_of_world populator starts")
|
||||
|
||||
glog.Infof("Starting Kubelet Volume Manager")
|
||||
go vm.reconciler.Run(stopCh)
|
||||
|
||||
<-stopCh
|
||||
glog.Infof("Shutting down Kubelet Volume Manager")
|
||||
}
|
||||
|
||||
func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap {
|
||||
podVolumes := make(container.VolumeMap)
|
||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{
|
||||
Mounter: mountedVolume.Mounter,
|
||||
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
|
||||
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
|
||||
}
|
||||
}
|
||||
return podVolumes
|
||||
}
|
||||
|
||||
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
||||
podName := volumehelper.GetUniquePodName(pod)
|
||||
supplementalGroups := sets.NewString()
|
||||
|
||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||
if mountedVolume.VolumeGidValue != "" {
|
||||
supplementalGroups.Insert(mountedVolume.VolumeGidValue)
|
||||
}
|
||||
}
|
||||
|
||||
result := make([]int64, 0, supplementalGroups.Len())
|
||||
for _, group := range supplementalGroups.List() {
|
||||
iGroup, extra := getExtraSupplementalGid(group, pod)
|
||||
if !extra {
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, int64(iGroup))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName {
|
||||
// Report volumes in desired state of world and actual state of world so
|
||||
// that volumes are marked in use as soon as the decision is made that the
|
||||
// volume *should* be attached to this node until it is safely unmounted.
|
||||
desiredVolumes := vm.desiredStateOfWorld.GetVolumesToMount()
|
||||
mountedVolumes := vm.actualStateOfWorld.GetGloballyMountedVolumes()
|
||||
volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(mountedVolumes))
|
||||
desiredVolumesMap := make(map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(mountedVolumes))
|
||||
|
||||
for _, volume := range desiredVolumes {
|
||||
if volume.PluginIsAttachable {
|
||||
if _, exists := desiredVolumesMap[volume.VolumeName]; !exists {
|
||||
desiredVolumesMap[volume.VolumeName] = true
|
||||
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, volume := range mountedVolumes {
|
||||
if volume.PluginIsAttachable {
|
||||
if _, exists := desiredVolumesMap[volume.VolumeName]; !exists {
|
||||
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(volumesToReportInUse, func(i, j int) bool {
|
||||
return string(volumesToReportInUse[i]) < string(volumesToReportInUse[j])
|
||||
})
|
||||
return volumesToReportInUse
|
||||
}
|
||||
|
||||
func (vm *volumeManager) ReconcilerStatesHasBeenSynced() bool {
|
||||
return vm.reconciler.StatesHasBeenSynced()
|
||||
}
|
||||
|
||||
func (vm *volumeManager) VolumeIsAttached(
|
||||
volumeName v1.UniqueVolumeName) bool {
|
||||
return vm.actualStateOfWorld.VolumeExists(volumeName)
|
||||
}
|
||||
|
||||
func (vm *volumeManager) MarkVolumesAsReportedInUse(
|
||||
volumesReportedAsInUse []v1.UniqueVolumeName) {
|
||||
vm.desiredStateOfWorld.MarkVolumesReportedInUse(volumesReportedAsInUse)
|
||||
}
|
||||
|
||||
func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
|
||||
expectedVolumes := getExpectedVolumes(pod)
|
||||
if len(expectedVolumes) == 0 {
|
||||
// No volumes to verify
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
|
||||
uniquePodName := volumehelper.GetUniquePodName(pod)
|
||||
|
||||
// Some pods expect to have Setup called over and over again to update.
|
||||
// Remount plugins for which this is true. (Atomically updating volumes,
|
||||
// like Downward API, depend on this to update the contents of the volume).
|
||||
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
|
||||
vm.actualStateOfWorld.MarkRemountRequired(uniquePodName)
|
||||
|
||||
err := wait.Poll(
|
||||
podAttachAndMountRetryInterval,
|
||||
podAttachAndMountTimeout,
|
||||
vm.verifyVolumesMountedFunc(uniquePodName, expectedVolumes))
|
||||
|
||||
if err != nil {
|
||||
// Timeout expired
|
||||
unmountedVolumes :=
|
||||
vm.getUnmountedVolumes(uniquePodName, expectedVolumes)
|
||||
if len(unmountedVolumes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf(
|
||||
"timeout expired waiting for volumes to attach/mount for pod %q/%q. list of unattached/unmounted volumes=%v",
|
||||
pod.Namespace,
|
||||
pod.Name,
|
||||
unmountedVolumes)
|
||||
}
|
||||
|
||||
glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod))
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyVolumesMountedFunc returns a method that returns true when all expected
|
||||
// volumes are mounted.
|
||||
func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc {
|
||||
return func() (done bool, err error) {
|
||||
return len(vm.getUnmountedVolumes(podName, expectedVolumes)) == 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getUnmountedVolumes fetches the current list of mounted volumes from
|
||||
// the actual state of the world, and uses it to process the list of
|
||||
// expectedVolumes. It returns a list of unmounted volumes.
|
||||
func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expectedVolumes []string) []string {
|
||||
mountedVolumes := sets.NewString()
|
||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||
mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName)
|
||||
}
|
||||
return filterUnmountedVolumes(mountedVolumes, expectedVolumes)
|
||||
}
|
||||
|
||||
// filterUnmountedVolumes adds each element of expectedVolumes that is not in
|
||||
// mountedVolumes to a list of unmountedVolumes and returns it.
|
||||
func filterUnmountedVolumes(mountedVolumes sets.String, expectedVolumes []string) []string {
|
||||
unmountedVolumes := []string{}
|
||||
for _, expectedVolume := range expectedVolumes {
|
||||
if !mountedVolumes.Has(expectedVolume) {
|
||||
unmountedVolumes = append(unmountedVolumes, expectedVolume)
|
||||
}
|
||||
}
|
||||
return unmountedVolumes
|
||||
}
|
||||
|
||||
// getExpectedVolumes returns a list of volumes that must be mounted in order to
|
||||
// consider the volume setup step for this pod satisfied.
|
||||
func getExpectedVolumes(pod *v1.Pod) []string {
|
||||
expectedVolumes := []string{}
|
||||
if pod == nil {
|
||||
return expectedVolumes
|
||||
}
|
||||
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
expectedVolumes = append(expectedVolumes, podVolume.Name)
|
||||
}
|
||||
|
||||
return expectedVolumes
|
||||
}
|
||||
|
||||
// getExtraSupplementalGid returns the value of an extra supplemental GID as
|
||||
// defined by an annotation on a volume and a boolean indicating whether the
|
||||
// volume defined a GID that the pod doesn't already request.
|
||||
func getExtraSupplementalGid(volumeGidValue string, pod *v1.Pod) (int64, bool) {
|
||||
if volumeGidValue == "" {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
gid, err := strconv.ParseInt(volumeGidValue, 10, 64)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
if pod.Spec.SecurityContext != nil {
|
||||
for _, existingGid := range pod.Spec.SecurityContext.SupplementalGroups {
|
||||
if gid == int64(existingGid) {
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return gid, true
|
||||
}
|
342
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_test.go
generated
vendored
Normal file
342
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_test.go
generated
vendored
Normal file
@ -0,0 +1,342 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volumemanager
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
const (
|
||||
testHostname = "test-hostname"
|
||||
)
|
||||
|
||||
func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager())
|
||||
|
||||
node, pod, pv, claim := createObjects()
|
||||
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
|
||||
|
||||
manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
|
||||
|
||||
stopCh := runVolumeManager(manager)
|
||||
defer close(stopCh)
|
||||
|
||||
podManager.SetPods([]*v1.Pod{pod})
|
||||
|
||||
// Fake node status update
|
||||
go simulateVolumeInUseUpdate(
|
||||
v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
|
||||
stopCh,
|
||||
manager)
|
||||
|
||||
err = manager.WaitForAttachAndMount(pod)
|
||||
if err != nil {
|
||||
t.Errorf("Expected success: %v", err)
|
||||
}
|
||||
|
||||
expectedMounted := pod.Spec.Volumes[0].Name
|
||||
actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID))
|
||||
if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) {
|
||||
t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted)
|
||||
}
|
||||
|
||||
expectedInUse := []v1.UniqueVolumeName{v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name)}
|
||||
actualInUse := manager.GetVolumesInUse()
|
||||
if !reflect.DeepEqual(expectedInUse, actualInUse) {
|
||||
t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialPendingVolumesForPodAndGetVolumesInUse(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager())
|
||||
|
||||
node, pod, pv, claim := createObjects()
|
||||
claim.Status = v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimPending,
|
||||
}
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
|
||||
|
||||
manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
|
||||
|
||||
stopCh := runVolumeManager(manager)
|
||||
defer close(stopCh)
|
||||
|
||||
podManager.SetPods([]*v1.Pod{pod})
|
||||
|
||||
// Fake node status update
|
||||
go simulateVolumeInUseUpdate(
|
||||
v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
|
||||
stopCh,
|
||||
manager)
|
||||
|
||||
// delayed claim binding
|
||||
go delayClaimBecomesBound(kubeClient, claim.GetNamespace(), claim.ObjectMeta.Name)
|
||||
|
||||
err = manager.WaitForAttachAndMount(pod)
|
||||
if err != nil {
|
||||
t.Errorf("Expected success: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager())
|
||||
|
||||
node, pod, _, claim := createObjects()
|
||||
|
||||
existingGid := pod.Spec.SecurityContext.SupplementalGroups[0]
|
||||
|
||||
cases := []struct {
|
||||
gidAnnotation string
|
||||
expected []int64
|
||||
}{
|
||||
{
|
||||
gidAnnotation: "777",
|
||||
expected: []int64{777},
|
||||
},
|
||||
{
|
||||
gidAnnotation: strconv.FormatInt(int64(existingGid), 10),
|
||||
expected: []int64{},
|
||||
},
|
||||
{
|
||||
gidAnnotation: "a",
|
||||
expected: []int64{},
|
||||
},
|
||||
{
|
||||
gidAnnotation: "",
|
||||
expected: []int64{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation,
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device",
|
||||
},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: claim.ObjectMeta.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
|
||||
|
||||
manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
|
||||
|
||||
stopCh := runVolumeManager(manager)
|
||||
defer func() {
|
||||
close(stopCh)
|
||||
}()
|
||||
|
||||
podManager.SetPods([]*v1.Pod{pod})
|
||||
|
||||
// Fake node status update
|
||||
go simulateVolumeInUseUpdate(
|
||||
v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
|
||||
stopCh,
|
||||
manager)
|
||||
|
||||
err = manager.WaitForAttachAndMount(pod)
|
||||
if err != nil {
|
||||
t.Errorf("Expected success: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
actual := manager.GetExtraSupplementalGroupsForPod(pod)
|
||||
if !reflect.DeepEqual(tc.expected, actual) {
|
||||
t.Errorf("Expected supplemental groups %v, got %v", tc.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newTestVolumeManager(tmpDir string, podManager pod.Manager, kubeClient clientset.Interface) VolumeManager {
|
||||
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
plugMgr := &volume.VolumePluginMgr{}
|
||||
// TODO (#51147) inject mock prober
|
||||
plugMgr.InitPlugins([]volume.VolumePlugin{plug}, nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, kubeClient, nil))
|
||||
statusManager := status.NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{})
|
||||
|
||||
vm := NewVolumeManager(
|
||||
true,
|
||||
testHostname,
|
||||
podManager,
|
||||
statusManager,
|
||||
kubeClient,
|
||||
plugMgr,
|
||||
&containertest.FakeRuntime{},
|
||||
&mount.FakeMounter{},
|
||||
"",
|
||||
fakeRecorder,
|
||||
false, /* experimentalCheckNodeCapabilitiesBeforeMount */
|
||||
false /* keepTerminatedPodVolumes */)
|
||||
|
||||
return vm
|
||||
}
|
||||
|
||||
// createObjects returns objects for making a fake clientset. The pv is
|
||||
// already attached to the node and bound to the claim used by the pod.
|
||||
func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testHostname},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
Name: "fake/pvA",
|
||||
DevicePath: "fake/path",
|
||||
},
|
||||
}},
|
||||
Spec: v1.NodeSpec{ExternalID: testHostname},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "abc",
|
||||
Namespace: "nsA",
|
||||
UID: "1234",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "claimA",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SupplementalGroups: []int64{555},
|
||||
},
|
||||
},
|
||||
}
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "fake-device",
|
||||
},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
},
|
||||
}
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claimA",
|
||||
Namespace: "nsA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvA",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
return node, pod, pv, claim
|
||||
}
|
||||
|
||||
func simulateVolumeInUseUpdate(volumeName v1.UniqueVolumeName, stopCh <-chan struct{}, volumeManager VolumeManager) {
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
volumeManager.MarkVolumesAsReportedInUse(
|
||||
[]v1.UniqueVolumeName{volumeName})
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func delayClaimBecomesBound(
|
||||
kubeClient clientset.Interface,
|
||||
namespace, claimName string,
|
||||
) {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
volumeClaim, _ :=
|
||||
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
|
||||
volumeClaim.Status = v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
}
|
||||
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim)
|
||||
return
|
||||
}
|
||||
|
||||
func runVolumeManager(manager VolumeManager) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
//readyCh := make(chan bool, 1)
|
||||
//readyCh <- true
|
||||
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
|
||||
go manager.Run(sourcesReady, stopCh)
|
||||
return stopCh
|
||||
}
|
Reference in New Issue
Block a user