mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
45
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/BUILD
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/BUILD
generated
vendored
@ -11,32 +11,36 @@ go_library(
|
||||
srcs = ["attach_detach_controller.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/metrics:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/populator:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/reconciler:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/util:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -49,11 +53,11 @@ go_test(
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/testing:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -69,6 +73,7 @@ filegroup(
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/controller/volume/attachdetach/cache:all-srcs",
|
||||
"//pkg/controller/volume/attachdetach/metrics:all-srcs",
|
||||
"//pkg/controller/volume/attachdetach/populator:all-srcs",
|
||||
"//pkg/controller/volume/attachdetach/reconciler:all-srcs",
|
||||
"//pkg/controller/volume/attachdetach/statusupdater:all-srcs",
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/OWNERS
generated
vendored
@ -1,2 +1,3 @@
|
||||
approvers:
|
||||
- saad-ali
|
||||
- gnufied
|
||||
|
194
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
194
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
@ -23,12 +23,13 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@ -36,14 +37,17 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
kcache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/util"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -93,6 +97,7 @@ type AttachDetachController interface {
|
||||
// NewAttachDetachController returns a new instance of AttachDetachController.
|
||||
func NewAttachDetachController(
|
||||
kubeClient clientset.Interface,
|
||||
csiClient csiclient.Interface,
|
||||
podInformer coreinformers.PodInformer,
|
||||
nodeInformer coreinformers.NodeInformer,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
@ -119,15 +124,18 @@ func NewAttachDetachController(
|
||||
// deleted (probably can't do this with sharedInformer), etc.
|
||||
adc := &attachDetachController{
|
||||
kubeClient: kubeClient,
|
||||
csiClient: csiClient,
|
||||
pvcLister: pvcInformer.Lister(),
|
||||
pvcsSynced: pvcInformer.Informer().HasSynced,
|
||||
pvLister: pvInformer.Lister(),
|
||||
pvsSynced: pvInformer.Informer().HasSynced,
|
||||
podLister: podInformer.Lister(),
|
||||
podsSynced: podInformer.Informer().HasSynced,
|
||||
podIndexer: podInformer.Informer().GetIndexer(),
|
||||
nodeLister: nodeInformer.Lister(),
|
||||
nodesSynced: nodeInformer.Informer().HasSynced,
|
||||
cloud: cloud,
|
||||
pvcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcs"),
|
||||
}
|
||||
|
||||
if err := adc.volumePluginMgr.InitPlugins(plugins, prober, adc); err != nil {
|
||||
@ -135,7 +143,7 @@ func NewAttachDetachController(
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"})
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
@ -179,20 +187,63 @@ func NewAttachDetachController(
|
||||
DeleteFunc: adc.podDelete,
|
||||
})
|
||||
|
||||
// This custom indexer will index pods by its PVC keys. Then we don't need
|
||||
// to iterate all pods every time to find pods which reference given PVC.
|
||||
adc.podIndexer.AddIndexers(kcache.Indexers{
|
||||
pvcKeyIndex: indexByPVCKey,
|
||||
})
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||
AddFunc: adc.nodeAdd,
|
||||
UpdateFunc: adc.nodeUpdate,
|
||||
DeleteFunc: adc.nodeDelete,
|
||||
})
|
||||
|
||||
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
adc.enqueuePVC(obj)
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
adc.enqueuePVC(new)
|
||||
},
|
||||
})
|
||||
|
||||
return adc, nil
|
||||
}
|
||||
|
||||
const (
|
||||
pvcKeyIndex string = "pvcKey"
|
||||
)
|
||||
|
||||
// indexByPVCKey returns PVC keys for given pod. Note that the index is only
|
||||
// used for attaching, so we are only interested in active pods with nodeName
|
||||
// set.
|
||||
func indexByPVCKey(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, nil
|
||||
}
|
||||
if len(pod.Spec.NodeName) == 0 || volumeutil.IsPodTerminated(pod, pod.Status) {
|
||||
return []string{}, nil
|
||||
}
|
||||
keys := []string{}
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
keys = append(keys, fmt.Sprintf("%s/%s", pod.Namespace, pvcSource.ClaimName))
|
||||
}
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
type attachDetachController struct {
|
||||
// kubeClient is the kube API client used by volumehost to communicate with
|
||||
// the API server.
|
||||
kubeClient clientset.Interface
|
||||
|
||||
// csiClient is the csi.storage.k8s.io API client used by volumehost to communicate with
|
||||
// the API server.
|
||||
csiClient csiclient.Interface
|
||||
|
||||
// pvcLister is the shared PVC lister used to fetch and store PVC
|
||||
// objects from the API server. It is shared with other controllers and
|
||||
// therefore the PVC objects in its store should be treated as immutable.
|
||||
@ -207,6 +258,7 @@ type attachDetachController struct {
|
||||
|
||||
podLister corelisters.PodLister
|
||||
podsSynced kcache.InformerSynced
|
||||
podIndexer kcache.Indexer
|
||||
|
||||
nodeLister corelisters.NodeLister
|
||||
nodesSynced kcache.InformerSynced
|
||||
@ -251,13 +303,17 @@ type attachDetachController struct {
|
||||
|
||||
// recorder is used to record events in the API server
|
||||
recorder record.EventRecorder
|
||||
|
||||
// pvcQueue is used to queue pvc objects
|
||||
pvcQueue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
defer adc.pvcQueue.ShutDown()
|
||||
|
||||
glog.Infof("Starting attach detach controller")
|
||||
defer glog.Infof("Shutting down attach detach controller")
|
||||
klog.Infof("Starting attach detach controller")
|
||||
defer klog.Infof("Shutting down attach detach controller")
|
||||
|
||||
if !controller.WaitForCacheSync("attach detach", stopCh, adc.podsSynced, adc.nodesSynced, adc.pvcsSynced, adc.pvsSynced) {
|
||||
return
|
||||
@ -265,20 +321,27 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
|
||||
|
||||
err := adc.populateActualStateOfWorld()
|
||||
if err != nil {
|
||||
glog.Errorf("Error populating the actual state of world: %v", err)
|
||||
klog.Errorf("Error populating the actual state of world: %v", err)
|
||||
}
|
||||
err = adc.populateDesiredStateOfWorld()
|
||||
if err != nil {
|
||||
glog.Errorf("Error populating the desired state of world: %v", err)
|
||||
klog.Errorf("Error populating the desired state of world: %v", err)
|
||||
}
|
||||
go adc.reconciler.Run(stopCh)
|
||||
go adc.desiredStateOfWorldPopulator.Run(stopCh)
|
||||
go wait.Until(adc.pvcWorker, time.Second, stopCh)
|
||||
metrics.Register(adc.pvcLister,
|
||||
adc.pvLister,
|
||||
adc.podLister,
|
||||
adc.actualStateOfWorld,
|
||||
adc.desiredStateOfWorld,
|
||||
&adc.volumePluginMgr)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) populateActualStateOfWorld() error {
|
||||
glog.V(5).Infof("Populating ActualStateOfworld")
|
||||
klog.V(5).Infof("Populating ActualStateOfworld")
|
||||
nodes, err := adc.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -295,7 +358,7 @@ func (adc *attachDetachController) populateActualStateOfWorld() error {
|
||||
// scans the pods and updates their volumes in the ActualStateOfWorld too.
|
||||
err = adc.actualStateOfWorld.MarkVolumeAsAttached(uniqueName, nil /* VolumeSpec */, nodeName, attachedVolume.DevicePath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to mark the volume as attached: %v", err)
|
||||
klog.Errorf("Failed to mark the volume as attached: %v", err)
|
||||
continue
|
||||
}
|
||||
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse)
|
||||
@ -328,7 +391,7 @@ func (adc *attachDetachController) getNodeVolumeDevicePath(
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) populateDesiredStateOfWorld() error {
|
||||
glog.V(5).Infof("Populating DesiredStateOfworld")
|
||||
klog.V(5).Infof("Populating DesiredStateOfworld")
|
||||
|
||||
pods, err := adc.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
@ -343,7 +406,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
|
||||
// pod will be detached and the spec is irrelevant.
|
||||
volumeSpec, err := util.CreateVolumeSpec(podVolume, podToAdd.Namespace, adc.pvcLister, adc.pvLister)
|
||||
if err != nil {
|
||||
glog.Errorf(
|
||||
klog.Errorf(
|
||||
"Error creating spec for volume %q, pod %q/%q: %v",
|
||||
podVolume.Name,
|
||||
podToAdd.Namespace,
|
||||
@ -354,7 +417,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
|
||||
nodeName := types.NodeName(podToAdd.Spec.NodeName)
|
||||
plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
|
||||
if err != nil || plugin == nil {
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v",
|
||||
podVolume.Name,
|
||||
podToAdd.Namespace,
|
||||
@ -364,7 +427,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
|
||||
}
|
||||
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
glog.Errorf(
|
||||
klog.Errorf(
|
||||
"Failed to find unique name for volume %q, pod %q/%q: %v",
|
||||
podVolume.Name,
|
||||
podToAdd.Namespace,
|
||||
@ -375,12 +438,12 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
|
||||
if adc.actualStateOfWorld.VolumeNodeExists(volumeName, nodeName) {
|
||||
devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to find device path: %v", err)
|
||||
klog.Errorf("Failed to find device path: %v", err)
|
||||
continue
|
||||
}
|
||||
err = adc.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, devicePath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to update volume spec for node %s: %v", nodeName, err)
|
||||
klog.Errorf("Failed to update volume spec for node %s: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -479,19 +542,96 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) {
|
||||
nodeName := types.NodeName(node.Name)
|
||||
if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil {
|
||||
// This might happen during drain, but we still want it to appear in our logs
|
||||
glog.Infof("error removing node %q from desired-state-of-world: %v", nodeName, err)
|
||||
klog.Infof("error removing node %q from desired-state-of-world: %v", nodeName, err)
|
||||
}
|
||||
|
||||
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse)
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) enqueuePVC(obj interface{}) {
|
||||
key, err := kcache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
|
||||
return
|
||||
}
|
||||
adc.pvcQueue.Add(key)
|
||||
}
|
||||
|
||||
// pvcWorker processes items from pvcQueue
|
||||
func (adc *attachDetachController) pvcWorker() {
|
||||
for adc.processNextItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) processNextItem() bool {
|
||||
keyObj, shutdown := adc.pvcQueue.Get()
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
defer adc.pvcQueue.Done(keyObj)
|
||||
|
||||
if err := adc.syncPVCByKey(keyObj.(string)); err != nil {
|
||||
// Rather than wait for a full resync, re-add the key to the
|
||||
// queue to be processed.
|
||||
adc.pvcQueue.AddRateLimited(keyObj)
|
||||
runtime.HandleError(fmt.Errorf("Failed to sync pvc %q, will retry again: %v", keyObj.(string), err))
|
||||
return true
|
||||
}
|
||||
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
adc.pvcQueue.Forget(keyObj)
|
||||
return true
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) syncPVCByKey(key string) error {
|
||||
klog.V(5).Infof("syncPVCByKey[%s]", key)
|
||||
namespace, name, err := kcache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error getting namespace & name of pvc %q to get pvc from informer: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(4).Infof("error getting pvc %q from informer: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
// Skip unbound PVCs.
|
||||
return nil
|
||||
}
|
||||
|
||||
objs, err := adc.podIndexer.ByIndex(pvcKeyIndex, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, obj := range objs {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
volumeActionFlag := util.DetermineVolumeAction(
|
||||
pod,
|
||||
adc.desiredStateOfWorld,
|
||||
true /* default volume action */)
|
||||
|
||||
util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */
|
||||
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processVolumesInUse processes the list of volumes marked as "in-use"
|
||||
// according to the specified Node's Status.VolumesInUse and updates the
|
||||
// corresponding volume in the actual state of the world to indicate that it is
|
||||
// mounted.
|
||||
func (adc *attachDetachController) processVolumesInUse(
|
||||
nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) {
|
||||
glog.V(4).Infof("processVolumesInUse for node %q", nodeName)
|
||||
klog.V(4).Infof("processVolumesInUse for node %q", nodeName)
|
||||
for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) {
|
||||
mounted := false
|
||||
for _, volumeInUse := range volumesInUse {
|
||||
@ -502,8 +642,8 @@ func (adc *attachDetachController) processVolumesInUse(
|
||||
}
|
||||
err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted)
|
||||
if err != nil {
|
||||
glog.Warningf(
|
||||
"SetVolumeMountedByNode(%q, %q, %q) returned an error: %v",
|
||||
klog.Warningf(
|
||||
"SetVolumeMountedByNode(%q, %q, %v) returned an error: %v",
|
||||
attachedVolume.VolumeName, nodeName, mounted, err)
|
||||
}
|
||||
}
|
||||
@ -559,10 +699,6 @@ func (adc *attachDetachController) GetMounter(pluginName string) mount.Interface
|
||||
return nil
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetWriter() io.Writer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetHostName() string {
|
||||
return ""
|
||||
}
|
||||
@ -593,6 +729,12 @@ func (adc *attachDetachController) GetServiceAccountTokenFunc() func(_, _ string
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) DeleteServiceAccountTokenFunc() func(types.UID) {
|
||||
return func(types.UID) {
|
||||
klog.Errorf("DeleteServiceAccountToken unsupported in attachDetachController")
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetExec(pluginName string) mount.Exec {
|
||||
return mount.NewOsExec()
|
||||
}
|
||||
@ -622,3 +764,7 @@ func (adc *attachDetachController) GetNodeName() types.NodeName {
|
||||
func (adc *attachDetachController) GetEventRecorder() record.EventRecorder {
|
||||
return adc.recorder
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetCSIClient() csiclient.Interface {
|
||||
return adc.csiClient
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ func Test_NewAttachDetachController_Positive(t *testing.T) {
|
||||
// Act
|
||||
_, err := NewAttachDetachController(
|
||||
fakeKubeClient,
|
||||
nil, /* csiClient */
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
@ -214,6 +215,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
||||
// Create the controller
|
||||
adcObj, err := NewAttachDetachController(
|
||||
fakeKubeClient,
|
||||
nil, /* csiClient */
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/BUILD
generated
vendored
@ -18,9 +18,9 @@ go_library(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -35,8 +35,8 @@ go_test(
|
||||
"//pkg/controller/volume/attachdetach/testing:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -304,7 +304,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
|
||||
// Update the fields for volume object except the nodes attached to the volumes.
|
||||
volumeObj.devicePath = devicePath
|
||||
volumeObj.spec = volumeSpec
|
||||
glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
|
||||
klog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
|
||||
volumeName,
|
||||
nodeName,
|
||||
devicePath)
|
||||
@ -321,7 +321,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
|
||||
detachRequestedTime: time.Time{},
|
||||
}
|
||||
} else {
|
||||
glog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q",
|
||||
klog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q",
|
||||
volumeName,
|
||||
nodeName)
|
||||
}
|
||||
@ -347,7 +347,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
|
||||
|
||||
nodeObj.mountedByNode = mounted
|
||||
volumeObj.nodesAttachedTo[nodeName] = nodeObj
|
||||
glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t",
|
||||
klog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t",
|
||||
volumeName,
|
||||
nodeName,
|
||||
mounted)
|
||||
@ -361,7 +361,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime(
|
||||
|
||||
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to ResetDetachRequestTime with error: %v", err)
|
||||
klog.Errorf("Failed to ResetDetachRequestTime with error: %v", err)
|
||||
return
|
||||
}
|
||||
nodeObj.detachRequestedTime = time.Time{}
|
||||
@ -381,7 +381,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
|
||||
if nodeObj.detachRequestedTime.IsZero() {
|
||||
nodeObj.detachRequestedTime = time.Now()
|
||||
volumeObj.nodesAttachedTo[nodeName] = nodeObj
|
||||
glog.V(4).Infof("Set detach request time to current time for volume %v on node %q",
|
||||
klog.V(4).Infof("Set detach request time to current time for volume %v on node %q",
|
||||
volumeName,
|
||||
nodeName)
|
||||
}
|
||||
@ -441,7 +441,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
// In case the volume/node entry is no longer in attachedVolume list, skip the rest
|
||||
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
|
||||
glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
|
||||
klog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
|
||||
return
|
||||
}
|
||||
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
|
||||
@ -453,7 +453,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
||||
volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName),
|
||||
}
|
||||
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
|
||||
glog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName)
|
||||
klog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName)
|
||||
}
|
||||
_, nodeToUpdateVolumeExists :=
|
||||
nodeToUpdate.volumesToReportAsAttached[volumeName]
|
||||
@ -461,7 +461,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
||||
nodeToUpdate.statusUpdateNeeded = true
|
||||
nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName
|
||||
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
|
||||
glog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName)
|
||||
klog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -488,7 +488,7 @@ func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil {
|
||||
glog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err)
|
||||
klog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -559,6 +559,7 @@ func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
|
||||
attachedVolumes = append(
|
||||
attachedVolumes,
|
||||
getAttachedVolume(&volumeObj, &nodeObj))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -622,7 +623,7 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][
|
||||
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating
|
||||
// the current status is already updated.
|
||||
if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil {
|
||||
glog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err)
|
||||
klog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
51
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics/BUILD
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics/BUILD
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["metrics.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/util:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["metrics_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/testing:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
203
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics/metrics.go
generated
vendored
Normal file
203
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics/metrics.go
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/util"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const pluginNameNotAvailable = "N/A"
|
||||
|
||||
var (
|
||||
inUseVolumeMetricDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", "storage_count", "attachable_volumes_in_use"),
|
||||
"Measure number of volumes in use",
|
||||
[]string{"node", "volume_plugin"}, nil)
|
||||
|
||||
totalVolumesMetricDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", "attachdetach_controller", "total_volumes"),
|
||||
"Number of volumes in A/D Controller",
|
||||
[]string{"plugin_name", "state"}, nil)
|
||||
|
||||
forcedDetachMetricCounter = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "attachdetach_controller_forced_detaches",
|
||||
Help: "Number of times the A/D Controller performed a forced detach"})
|
||||
)
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// Register registers metrics in A/D Controller.
|
||||
func Register(pvcLister corelisters.PersistentVolumeClaimLister,
|
||||
pvLister corelisters.PersistentVolumeLister,
|
||||
podLister corelisters.PodLister,
|
||||
asw cache.ActualStateOfWorld,
|
||||
dsw cache.DesiredStateOfWorld,
|
||||
pluginMgr *volume.VolumePluginMgr) {
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(newAttachDetachStateCollector(pvcLister,
|
||||
podLister,
|
||||
pvLister,
|
||||
asw,
|
||||
dsw,
|
||||
pluginMgr))
|
||||
prometheus.MustRegister(forcedDetachMetricCounter)
|
||||
})
|
||||
}
|
||||
|
||||
type attachDetachStateCollector struct {
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
podLister corelisters.PodLister
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
asw cache.ActualStateOfWorld
|
||||
dsw cache.DesiredStateOfWorld
|
||||
volumePluginMgr *volume.VolumePluginMgr
|
||||
}
|
||||
|
||||
// volumeCount is a map of maps used as a counter, e.g.:
|
||||
// node 172.168.1.100.ec2.internal has 10 EBS and 3 glusterfs PVC in use:
|
||||
// {"172.168.1.100.ec2.internal": {"aws-ebs": 10, "glusterfs": 3}}
|
||||
// state actual_state_of_world contains a total of 10 EBS volumes:
|
||||
// {"actual_state_of_world": {"aws-ebs": 10}}
|
||||
type volumeCount map[string]map[string]int64
|
||||
|
||||
func (v volumeCount) add(typeKey, counterKey string) {
|
||||
count, ok := v[typeKey]
|
||||
if !ok {
|
||||
count = map[string]int64{}
|
||||
}
|
||||
count[counterKey]++
|
||||
v[typeKey] = count
|
||||
}
|
||||
|
||||
func newAttachDetachStateCollector(
|
||||
pvcLister corelisters.PersistentVolumeClaimLister,
|
||||
podLister corelisters.PodLister,
|
||||
pvLister corelisters.PersistentVolumeLister,
|
||||
asw cache.ActualStateOfWorld,
|
||||
dsw cache.DesiredStateOfWorld,
|
||||
pluginMgr *volume.VolumePluginMgr) *attachDetachStateCollector {
|
||||
return &attachDetachStateCollector{pvcLister, podLister, pvLister, asw, dsw, pluginMgr}
|
||||
}
|
||||
|
||||
// Check if our collector implements necessary collector interface
|
||||
var _ prometheus.Collector = &attachDetachStateCollector{}
|
||||
|
||||
func (collector *attachDetachStateCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- inUseVolumeMetricDesc
|
||||
ch <- totalVolumesMetricDesc
|
||||
}
|
||||
|
||||
func (collector *attachDetachStateCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
nodeVolumeMap := collector.getVolumeInUseCount()
|
||||
for nodeName, pluginCount := range nodeVolumeMap {
|
||||
for pluginName, count := range pluginCount {
|
||||
metric, err := prometheus.NewConstMetric(inUseVolumeMetricDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
string(nodeName),
|
||||
pluginName)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to create metric : %v", err)
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
|
||||
stateVolumeMap := collector.getTotalVolumesCount()
|
||||
for stateName, pluginCount := range stateVolumeMap {
|
||||
for pluginName, count := range pluginCount {
|
||||
metric, err := prometheus.NewConstMetric(totalVolumesMetricDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
pluginName,
|
||||
string(stateName))
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to create metric : %v", err)
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *attachDetachStateCollector) getVolumeInUseCount() volumeCount {
|
||||
pods, err := collector.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("Error getting pod list")
|
||||
return nil
|
||||
}
|
||||
|
||||
nodeVolumeMap := make(volumeCount)
|
||||
for _, pod := range pods {
|
||||
if len(pod.Spec.Volumes) <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName == "" {
|
||||
continue
|
||||
}
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
volumeSpec, err := util.CreateVolumeSpec(podVolume, pod.Namespace, collector.pvcLister, collector.pvLister)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
volumePlugin, err := collector.volumePluginMgr.FindPluginBySpec(volumeSpec)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pluginName := volumeutil.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeSpec)
|
||||
nodeVolumeMap.add(pod.Spec.NodeName, pluginName)
|
||||
}
|
||||
}
|
||||
return nodeVolumeMap
|
||||
}
|
||||
|
||||
func (collector *attachDetachStateCollector) getTotalVolumesCount() volumeCount {
|
||||
stateVolumeMap := make(volumeCount)
|
||||
for _, v := range collector.dsw.GetVolumesToAttach() {
|
||||
if plugin, err := collector.volumePluginMgr.FindPluginBySpec(v.VolumeSpec); err == nil {
|
||||
pluginName := pluginNameNotAvailable
|
||||
if plugin != nil {
|
||||
pluginName = volumeutil.GetFullQualifiedPluginNameForVolume(plugin.GetPluginName(), v.VolumeSpec)
|
||||
}
|
||||
stateVolumeMap.add("desired_state_of_world", pluginName)
|
||||
}
|
||||
}
|
||||
for _, v := range collector.asw.GetAttachedVolumes() {
|
||||
if plugin, err := collector.volumePluginMgr.FindPluginBySpec(v.VolumeSpec); err == nil {
|
||||
pluginName := pluginNameNotAvailable
|
||||
if plugin != nil {
|
||||
pluginName = volumeutil.GetFullQualifiedPluginNameForVolume(plugin.GetPluginName(), v.VolumeSpec)
|
||||
}
|
||||
stateVolumeMap.add("actual_state_of_world", pluginName)
|
||||
}
|
||||
}
|
||||
return stateVolumeMap
|
||||
}
|
||||
|
||||
// RecordForcedDetachMetric register a forced detach metric.
|
||||
func RecordForcedDetachMetric() {
|
||||
forcedDetachMetricCounter.Inc()
|
||||
}
|
180
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics/metrics_test.go
generated
vendored
Normal file
180
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics/metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
func TestVolumesInUseMetricCollection(t *testing.T) {
|
||||
fakeVolumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
||||
fakePodInformer := fakeInformerFactory.Core().V1().Pods()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "metric-test-pod",
|
||||
UID: "metric-test-pod-uid",
|
||||
Namespace: "metric-test",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "metric-test-host",
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "metric-test-volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "metric-test-pvc",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPhase("Running"),
|
||||
},
|
||||
}
|
||||
|
||||
fakePodInformer.Informer().GetStore().Add(pod)
|
||||
pvcInformer := fakeInformerFactory.Core().V1().PersistentVolumeClaims()
|
||||
pvInformer := fakeInformerFactory.Core().V1().PersistentVolumes()
|
||||
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "metric-test-pvc",
|
||||
Namespace: "metric-test",
|
||||
UID: "metric-test-pvc-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany, v1.ReadWriteOnce},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2G"),
|
||||
},
|
||||
},
|
||||
VolumeName: "test-metric-pv-1",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
},
|
||||
}
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "test-metric-pv-1",
|
||||
Name: "test-metric-pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("5G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},
|
||||
// this one we're pretending is already bound
|
||||
ClaimRef: &v1.ObjectReference{UID: "metric-test-pvc-1", Namespace: "metric-test"},
|
||||
},
|
||||
}
|
||||
pvcInformer.Informer().GetStore().Add(pvc)
|
||||
pvInformer.Informer().GetStore().Add(pv)
|
||||
pvcLister := pvcInformer.Lister()
|
||||
pvLister := pvInformer.Lister()
|
||||
|
||||
metricCollector := newAttachDetachStateCollector(
|
||||
pvcLister,
|
||||
fakePodInformer.Lister(),
|
||||
pvLister,
|
||||
nil,
|
||||
nil,
|
||||
fakeVolumePluginMgr)
|
||||
nodeUseMap := metricCollector.getVolumeInUseCount()
|
||||
if len(nodeUseMap) < 1 {
|
||||
t.Errorf("Expected one volume in use got %d", len(nodeUseMap))
|
||||
}
|
||||
testNodeMetric := nodeUseMap["metric-test-host"]
|
||||
pluginUseCount, ok := testNodeMetric["fake-plugin"]
|
||||
if !ok {
|
||||
t.Errorf("Expected fake plugin pvc got nothing")
|
||||
}
|
||||
|
||||
if pluginUseCount < 1 {
|
||||
t.Errorf("Expected at least in-use volume metric got %d", pluginUseCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTotalVolumesMetricCollection(t *testing.T) {
|
||||
fakeVolumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(fakeVolumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(fakeVolumePluginMgr)
|
||||
podName := "pod-uid"
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
|
||||
dsw.AddNode(nodeName, false)
|
||||
_, err := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
asw.AddVolumeNode(volumeName, volumeSpec, nodeName, "")
|
||||
|
||||
metricCollector := newAttachDetachStateCollector(
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
asw,
|
||||
dsw,
|
||||
fakeVolumePluginMgr)
|
||||
|
||||
totalVolumesMap := metricCollector.getTotalVolumesCount()
|
||||
if len(totalVolumesMap) != 2 {
|
||||
t.Errorf("Expected 2 states, got %d", len(totalVolumesMap))
|
||||
}
|
||||
|
||||
dswCount, ok := totalVolumesMap["desired_state_of_world"]
|
||||
if !ok {
|
||||
t.Errorf("Expected desired_state_of_world, got nothing")
|
||||
}
|
||||
|
||||
fakePluginCount := dswCount["fake-plugin"]
|
||||
if fakePluginCount != 1 {
|
||||
t.Errorf("Expected 1 fake-plugin volume in DesiredStateOfWorld, got %d", fakePluginCount)
|
||||
}
|
||||
|
||||
aswCount, ok := totalVolumesMap["actual_state_of_world"]
|
||||
if !ok {
|
||||
t.Errorf("Expected actual_state_of_world, got nothing")
|
||||
}
|
||||
|
||||
fakePluginCount = aswCount["fake-plugin"]
|
||||
if fakePluginCount != 1 {
|
||||
t.Errorf("Expected 1 fake-plugin volume in ActualStateOfWorld, got %d", fakePluginCount)
|
||||
}
|
||||
}
|
24
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/BUILD
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/BUILD
generated
vendored
@ -15,13 +15,13 @@ go_library(
|
||||
"//pkg/controller/volume/attachdetach/util:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -47,10 +47,10 @@ go_test(
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -92,7 +92,7 @@ func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() {
|
||||
// findAndAddActivePods is called periodically, independently of the main
|
||||
// populator loop.
|
||||
if time.Since(dswp.timeOfLastListPods) < dswp.listPodsRetryDuration {
|
||||
glog.V(5).Infof(
|
||||
klog.V(5).Infof(
|
||||
"Skipping findAndAddActivePods(). Not permitted until %v (listPodsRetryDuration %v).",
|
||||
dswp.timeOfLastListPods.Add(dswp.listPodsRetryDuration),
|
||||
dswp.listPodsRetryDuration)
|
||||
@ -109,7 +109,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
for dswPodUID, dswPodToAdd := range dswp.desiredStateOfWorld.GetPodToAdd() {
|
||||
dswPodKey, err := kcache.MetaNamespaceKeyFunc(dswPodToAdd.Pod)
|
||||
if err != nil {
|
||||
glog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err)
|
||||
klog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -124,7 +124,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
case errors.IsNotFound(err):
|
||||
// if we can't find the pod, we need to delete it below
|
||||
case err != nil:
|
||||
glog.Errorf("podLister Get failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err)
|
||||
klog.Errorf("podLister Get failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err)
|
||||
continue
|
||||
default:
|
||||
volumeActionFlag := util.DetermineVolumeAction(
|
||||
@ -136,7 +136,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
informerPodUID := volutil.GetUniquePodName(informerPod)
|
||||
// Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer
|
||||
if informerPodUID == dswPodUID {
|
||||
glog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
|
||||
klog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -144,7 +144,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
|
||||
// the pod from dsw does not exist in pod informer, or it does not match the unique identifier retrieved
|
||||
// from the informer, delete it from dsw
|
||||
glog.V(1).Infof("Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID)
|
||||
klog.V(1).Infof("Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID)
|
||||
dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName)
|
||||
}
|
||||
}
|
||||
@ -152,7 +152,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods() {
|
||||
pods, err := dswp.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("podLister List failed: %v", err)
|
||||
klog.Errorf("podLister List failed: %v", err)
|
||||
return
|
||||
}
|
||||
dswp.timeOfLastListPods = time.Now()
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/BUILD
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/BUILD
generated
vendored
@ -12,16 +12,17 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler",
|
||||
deps = [
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/metrics:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -38,11 +39,11 @@ go_test(
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
54
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
@ -24,12 +24,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
kevents "k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||
@ -109,11 +110,11 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
|
||||
rc.reconcile()
|
||||
|
||||
if rc.disableReconciliationSync {
|
||||
glog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.")
|
||||
klog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.")
|
||||
} else if rc.syncDuration < time.Second {
|
||||
glog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.")
|
||||
klog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.")
|
||||
} else if time.Since(rc.timeOfLastSync) > rc.syncDuration {
|
||||
glog.V(5).Info("Starting reconciling attached volumes still attached")
|
||||
klog.V(5).Info("Starting reconciling attached volumes still attached")
|
||||
rc.sync()
|
||||
}
|
||||
}
|
||||
@ -151,10 +152,6 @@ func (rc *reconciler) isMultiAttachForbidden(volumeSpec *volume.Spec) bool {
|
||||
// multi-attach. We trust in the individual volume implementations to not allow unsupported access modes
|
||||
if volumeSpec.PersistentVolume != nil {
|
||||
// Check for persistent volume types which do not fail when trying to multi-attach
|
||||
if volumeSpec.PersistentVolume.Spec.VsphereVolume != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(volumeSpec.PersistentVolume.Spec.AccessModes) == 0 {
|
||||
// No access mode specified so we don't know for sure. Let the attacher fail if needed
|
||||
return false
|
||||
@ -187,21 +184,21 @@ func (rc *reconciler) reconcile() {
|
||||
// may pass while at the same time the volume leaves the pending state, resulting in
|
||||
// double detach attempts
|
||||
if rc.attacherDetacher.IsOperationPending(attachedVolume.VolumeName, "") {
|
||||
glog.V(10).Infof("Operation for volume %q is already running. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
klog.V(10).Infof("Operation for volume %q is already running. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
// Set the detach request time
|
||||
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err)
|
||||
klog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err)
|
||||
continue
|
||||
}
|
||||
// Check whether timeout has reached the maximum waiting time
|
||||
timeout := elapsedTime > rc.maxWaitForUnmountDuration
|
||||
// Check whether volume is still mounted. Skip detach if it is still mounted unless timeout
|
||||
if attachedVolume.MountedByNode && !timeout {
|
||||
glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", ""))
|
||||
klog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", ""))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -209,7 +206,7 @@ func (rc *reconciler) reconcile() {
|
||||
// If it fails to update node status, skip detach volume
|
||||
err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v",
|
||||
klog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v",
|
||||
attachedVolume.VolumeName,
|
||||
attachedVolume.NodeName,
|
||||
err)
|
||||
@ -219,26 +216,27 @@ func (rc *reconciler) reconcile() {
|
||||
err = rc.nodeStatusUpdater.UpdateNodeStatuses()
|
||||
if err != nil {
|
||||
// Skip detaching this volume if unable to update node status
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error())
|
||||
klog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// Trigger detach volume which requires verifing safe to detach step
|
||||
// If timeout is true, skip verifySafeToDetach check
|
||||
glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
|
||||
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
|
||||
verifySafeToDetach := !timeout
|
||||
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
|
||||
if err == nil {
|
||||
if !timeout {
|
||||
glog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", ""))
|
||||
klog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", ""))
|
||||
} else {
|
||||
glog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
|
||||
metrics.RecordForcedDetachMetric()
|
||||
klog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
|
||||
}
|
||||
}
|
||||
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
|
||||
klog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -248,7 +246,7 @@ func (rc *reconciler) reconcile() {
|
||||
// Update Node Status
|
||||
err := rc.nodeStatusUpdater.UpdateNodeStatuses()
|
||||
if err != nil {
|
||||
glog.Warningf("UpdateNodeStatuses failed with: %v", err)
|
||||
klog.Warningf("UpdateNodeStatuses failed with: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,16 +255,16 @@ func (rc *reconciler) attachDesiredVolumes() {
|
||||
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
|
||||
if rc.actualStateOfWorld.VolumeNodeExists(volumeToAttach.VolumeName, volumeToAttach.NodeName) {
|
||||
// Volume/Node exists, touch it to reset detachRequestedTime
|
||||
if glog.V(5) {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
|
||||
if klog.V(5) {
|
||||
klog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
|
||||
}
|
||||
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
continue
|
||||
}
|
||||
// Don't even try to start an operation if there is already one running
|
||||
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "") {
|
||||
if glog.V(10) {
|
||||
glog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
if klog.V(10) {
|
||||
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@ -283,17 +281,17 @@ func (rc *reconciler) attachDesiredVolumes() {
|
||||
}
|
||||
|
||||
// Volume/Node doesn't exist, spawn a goroutine to attach it
|
||||
if glog.V(5) {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
|
||||
if klog.V(5) {
|
||||
klog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
|
||||
}
|
||||
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
|
||||
if err == nil {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", ""))
|
||||
klog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", ""))
|
||||
}
|
||||
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error())
|
||||
klog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -328,7 +326,7 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
|
||||
// Log detailed message to system admin
|
||||
nodeList := strings.Join(otherNodesStr, ", ")
|
||||
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already exclusively attached to node %s and can't be attached to another", nodeList))
|
||||
glog.Warningf(detailedMsg)
|
||||
klog.Warningf(detailedMsg)
|
||||
return
|
||||
}
|
||||
|
||||
@ -369,5 +367,5 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
|
||||
podNames = append(podNames, pod.Namespace+"/"+pod.Name)
|
||||
}
|
||||
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already used by pods %s on node %s", strings.Join(podNames, ", "), strings.Join(otherNodesStr, ", ")))
|
||||
glog.Warningf(detailedMsg)
|
||||
klog.Warningf(detailedMsg)
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/BUILD
generated
vendored
@ -15,12 +15,12 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -19,7 +19,7 @@ limitations under the License.
|
||||
package statusupdater
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -65,7 +65,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||
if errors.IsNotFound(err) {
|
||||
// If node does not exist, its status cannot be updated.
|
||||
// Do nothing so that there is no retry until node is created.
|
||||
glog.V(2).Infof(
|
||||
klog.V(2).Infof(
|
||||
"Could not update node status. Failed to find node %q in NodeInformer cache. Error: '%v'",
|
||||
nodeName,
|
||||
err)
|
||||
@ -73,7 +73,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||
} else if err != nil {
|
||||
// For all other errors, log error and reset flag statusUpdateNeeded
|
||||
// back to true to indicate this node status needs to be updated again.
|
||||
glog.V(2).Infof("Error retrieving nodes from node lister. Error: %v", err)
|
||||
klog.V(2).Infof("Error retrieving nodes from node lister. Error: %v", err)
|
||||
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
|
||||
continue
|
||||
}
|
||||
@ -83,7 +83,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||
// to indicate this node status needs to be updated again
|
||||
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
|
||||
|
||||
glog.V(2).Infof(
|
||||
klog.V(2).Infof(
|
||||
"Could not update node status for %q; re-marking for update. %v",
|
||||
nodeName,
|
||||
err)
|
||||
@ -103,6 +103,6 @@ func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Updating status %q for node %q succeeded. VolumesAttached: %v", patchBytes, nodeName, attachedVolumes)
|
||||
klog.V(4).Infof("Updating status %q for node %q succeeded. VolumesAttached: %v", patchBytes, nodeName, attachedVolumes)
|
||||
return nil
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing/BUILD
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing/BUILD
generated
vendored
@ -12,14 +12,14 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing/testvolumespec.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing/testvolumespec.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -29,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
@ -237,7 +237,7 @@ func (plugin *TestPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
plugin.pluginLock.Lock()
|
||||
defer plugin.pluginLock.Unlock()
|
||||
if spec == nil {
|
||||
glog.Errorf("GetVolumeName called with nil volume spec")
|
||||
klog.Errorf("GetVolumeName called with nil volume spec")
|
||||
plugin.ErrorEncountered = true
|
||||
}
|
||||
return spec.Name(), nil
|
||||
@ -247,7 +247,7 @@ func (plugin *TestPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
plugin.pluginLock.Lock()
|
||||
defer plugin.pluginLock.Unlock()
|
||||
if spec == nil {
|
||||
glog.Errorf("CanSupport called with nil volume spec")
|
||||
klog.Errorf("CanSupport called with nil volume spec")
|
||||
plugin.ErrorEncountered = true
|
||||
}
|
||||
return true
|
||||
@ -261,7 +261,7 @@ func (plugin *TestPlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod, opts vol
|
||||
plugin.pluginLock.Lock()
|
||||
defer plugin.pluginLock.Unlock()
|
||||
if spec == nil {
|
||||
glog.Errorf("NewMounter called with nil volume spec")
|
||||
klog.Errorf("NewMounter called with nil volume spec")
|
||||
plugin.ErrorEncountered = true
|
||||
}
|
||||
return nil, nil
|
||||
@ -294,6 +294,10 @@ func (plugin *TestPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
return &attacher, nil
|
||||
}
|
||||
|
||||
func (plugin *TestPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
|
||||
return plugin.NewAttacher()
|
||||
}
|
||||
|
||||
func (plugin *TestPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
detacher := testPluginDetacher{
|
||||
detachedVolumeMap: plugin.detachedVolumeMap,
|
||||
@ -302,6 +306,10 @@ func (plugin *TestPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
return &detacher, nil
|
||||
}
|
||||
|
||||
func (plugin *TestPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
|
||||
return plugin.NewDetacher()
|
||||
}
|
||||
|
||||
func (plugin *TestPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
@ -365,7 +373,7 @@ func (attacher *testPluginAttacher) Attach(spec *volume.Spec, nodeName types.Nod
|
||||
defer attacher.pluginLock.Unlock()
|
||||
if spec == nil {
|
||||
*attacher.ErrorEncountered = true
|
||||
glog.Errorf("Attach called with nil volume spec")
|
||||
klog.Errorf("Attach called with nil volume spec")
|
||||
return "", fmt.Errorf("Attach called with nil volume spec")
|
||||
}
|
||||
attacher.attachedVolumeMap[string(nodeName)] = append(attacher.attachedVolumeMap[string(nodeName)], spec.Name())
|
||||
@ -381,7 +389,7 @@ func (attacher *testPluginAttacher) WaitForAttach(spec *volume.Spec, devicePath
|
||||
defer attacher.pluginLock.Unlock()
|
||||
if spec == nil {
|
||||
*attacher.ErrorEncountered = true
|
||||
glog.Errorf("WaitForAttach called with nil volume spec")
|
||||
klog.Errorf("WaitForAttach called with nil volume spec")
|
||||
return "", fmt.Errorf("WaitForAttach called with nil volume spec")
|
||||
}
|
||||
fakePath := fmt.Sprintf("%s/%s", devicePath, spec.Name())
|
||||
@ -393,7 +401,7 @@ func (attacher *testPluginAttacher) GetDeviceMountPath(spec *volume.Spec) (strin
|
||||
defer attacher.pluginLock.Unlock()
|
||||
if spec == nil {
|
||||
*attacher.ErrorEncountered = true
|
||||
glog.Errorf("GetDeviceMountPath called with nil volume spec")
|
||||
klog.Errorf("GetDeviceMountPath called with nil volume spec")
|
||||
return "", fmt.Errorf("GetDeviceMountPath called with nil volume spec")
|
||||
}
|
||||
return "", nil
|
||||
@ -404,7 +412,7 @@ func (attacher *testPluginAttacher) MountDevice(spec *volume.Spec, devicePath st
|
||||
defer attacher.pluginLock.Unlock()
|
||||
if spec == nil {
|
||||
*attacher.ErrorEncountered = true
|
||||
glog.Errorf("MountDevice called with nil volume spec")
|
||||
klog.Errorf("MountDevice called with nil volume spec")
|
||||
return fmt.Errorf("MountDevice called with nil volume spec")
|
||||
}
|
||||
return nil
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/util/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/util/BUILD
generated
vendored
@ -13,10 +13,10 @@ go_library(
|
||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/util/util.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/util/util.go
generated
vendored
@ -19,10 +19,10 @@ package util
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -32,7 +32,7 @@ import (
|
||||
// specified volume. It dereference any PVC to get PV objects, if needed.
|
||||
func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister) (*volume.Spec, error) {
|
||||
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Found PVC, ClaimName: %q/%q",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName)
|
||||
@ -48,7 +48,7 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli
|
||||
err)
|
||||
}
|
||||
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
@ -66,9 +66,9 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli
|
||||
err)
|
||||
}
|
||||
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
|
||||
volumeSpec.Name,
|
||||
volumeSpec.Name(),
|
||||
pvName,
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
@ -166,7 +166,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||
}
|
||||
|
||||
if len(pod.Spec.Volumes) <= 0 {
|
||||
glog.V(10).Infof("Skipping processing of pod %q/%q: it has no volumes.",
|
||||
klog.V(10).Infof("Skipping processing of pod %q/%q: it has no volumes.",
|
||||
pod.Namespace,
|
||||
pod.Name)
|
||||
return
|
||||
@ -174,7 +174,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||
|
||||
nodeName := types.NodeName(pod.Spec.NodeName)
|
||||
if nodeName == "" {
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Skipping processing of pod %q/%q: it is not scheduled to a node.",
|
||||
pod.Namespace,
|
||||
pod.Name)
|
||||
@ -183,7 +183,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||
// If the node the pod is scheduled to does not exist in the desired
|
||||
// state of the world data structure, that indicates the node is not
|
||||
// yet managed by the controller. Therefore, ignore the pod.
|
||||
glog.V(4).Infof(
|
||||
klog.V(4).Infof(
|
||||
"Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.",
|
||||
pod.Namespace,
|
||||
pod.Name,
|
||||
@ -195,7 +195,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
volumeSpec, err := CreateVolumeSpec(podVolume, pod.Namespace, pvcLister, pvLister)
|
||||
if err != nil {
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Error processing volume %q for pod %q/%q: %v",
|
||||
podVolume.Name,
|
||||
pod.Namespace,
|
||||
@ -207,7 +207,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||
attachableVolumePlugin, err :=
|
||||
volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
|
||||
if err != nil || attachableVolumePlugin == nil {
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v",
|
||||
podVolume.Name,
|
||||
pod.Namespace,
|
||||
@ -222,7 +222,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||
_, err := desiredStateOfWorld.AddPod(
|
||||
uniquePodName, pod, volumeSpec, nodeName)
|
||||
if err != nil {
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v",
|
||||
podVolume.Name,
|
||||
pod.Namespace,
|
||||
@ -235,7 +235,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||
uniqueVolumeName, err := util.GetUniqueVolumeNameFromSpec(
|
||||
attachableVolumePlugin, volumeSpec)
|
||||
if err != nil {
|
||||
glog.V(10).Infof(
|
||||
klog.V(10).Infof(
|
||||
"Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed with %v",
|
||||
podVolume.Name,
|
||||
pod.Namespace,
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/volume/events/event.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/volume/events/event.go
generated
vendored
@ -30,4 +30,5 @@ const (
|
||||
ProvisioningCleanupFailed = "ProvisioningCleanupFailed"
|
||||
ProvisioningSucceeded = "ProvisioningSucceeded"
|
||||
WaitForFirstConsumer = "WaitForFirstConsumer"
|
||||
ExternalExpanding = "ExternalExpanding"
|
||||
)
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD
generated
vendored
@ -1,9 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -14,31 +11,32 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/expand",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/volume/events:go_default_library",
|
||||
"//pkg/controller/volume/expand/cache:go_default_library",
|
||||
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD
generated
vendored
@ -11,16 +11,15 @@ go_library(
|
||||
srcs = ["volume_resize_map.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/expand/cache",
|
||||
deps = [
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -43,10 +42,10 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
15
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go
generated
vendored
@ -21,14 +21,13 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
commontypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
@ -78,7 +77,7 @@ func (pvcr *PVCWithResizeRequest) UniquePVCKey() types.UniquePVCName {
|
||||
|
||||
// QualifiedName returns namespace and name combination of the PVC
|
||||
func (pvcr *PVCWithResizeRequest) QualifiedName() string {
|
||||
return strings.JoinQualifiedName(pvcr.PVC.Namespace, pvcr.PVC.Name)
|
||||
return util.GetPersistentVolumeClaimQualifiedName(pvcr.PVC)
|
||||
}
|
||||
|
||||
// NewVolumeResizeMap returns new VolumeResizeMap which acts as a cache
|
||||
@ -98,7 +97,7 @@ func NewVolumeResizeMap(kubeClient clientset.Interface) VolumeResizeMap {
|
||||
// the PVC and hopefully after a no-op resize in volume plugin, PVC will be updated with right values as well.
|
||||
func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
|
||||
if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.Name != pv.Spec.ClaimRef.Name {
|
||||
glog.V(4).Infof("Persistent Volume is not bound to PVC being updated : %s", util.ClaimToClaimKey(pvc))
|
||||
klog.V(4).Infof("Persistent Volume is not bound to PVC being updated : %s", util.ClaimToClaimKey(pvc))
|
||||
return
|
||||
}
|
||||
|
||||
@ -113,7 +112,7 @@ func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Adding pvc %s with Size %s/%s for resizing", util.ClaimToClaimKey(pvc), pvcSize.String(), pvcStatusSize.String())
|
||||
klog.V(4).Infof("Adding pvc %s with Size %s/%s for resizing", util.ClaimToClaimKey(pvc), pvcSize.String(), pvcStatusSize.String())
|
||||
|
||||
pvcRequest := &PVCWithResizeRequest{
|
||||
PVC: pvc,
|
||||
@ -145,7 +144,7 @@ func (resizeMap *volumeResizeMap) GetPVCsWithResizeRequest() []*PVCWithResizeReq
|
||||
// deleting a pvc in this map doesn't affect operations that are already inflight.
|
||||
func (resizeMap *volumeResizeMap) DeletePVC(pvc *v1.PersistentVolumeClaim) {
|
||||
pvcUniqueName := types.UniquePVCName(pvc.UID)
|
||||
glog.V(5).Infof("Removing PVC %v from resize map", pvcUniqueName)
|
||||
klog.V(5).Infof("Removing PVC %v from resize map", pvcUniqueName)
|
||||
resizeMap.Lock()
|
||||
defer resizeMap.Unlock()
|
||||
delete(resizeMap.pvcrs, pvcUniqueName)
|
||||
@ -157,7 +156,7 @@ func (resizeMap *volumeResizeMap) MarkAsResized(pvcr *PVCWithResizeRequest, newS
|
||||
|
||||
err := resizeMap.updatePVCCapacityAndConditions(pvcr, newSize, emptyCondition)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error updating PV spec capacity for volume %q with : %v", pvcr.QualifiedName(), err)
|
||||
klog.V(4).Infof("Error updating PV spec capacity for volume %q with : %v", pvcr.QualifiedName(), err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -206,7 +205,7 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi
|
||||
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, commontypes.StrategicMergePatchType, patchBytes)
|
||||
|
||||
if updateErr != nil {
|
||||
glog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr)
|
||||
klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr)
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
|
58
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go
generated
vendored
58
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -37,12 +37,14 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
kcache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/events"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/expand/cache"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
@ -116,15 +118,15 @@ func NewExpandController(
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
|
||||
expc.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
|
||||
expc.opExecutor = operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
kubeClient,
|
||||
&expc.volumePluginMgr,
|
||||
recorder,
|
||||
expc.recorder,
|
||||
false,
|
||||
blkutil))
|
||||
|
||||
@ -141,14 +143,15 @@ func NewExpandController(
|
||||
expc.resizeMap,
|
||||
expc.pvcLister,
|
||||
expc.pvLister,
|
||||
&expc.volumePluginMgr,
|
||||
kubeClient)
|
||||
return expc, nil
|
||||
}
|
||||
|
||||
func (expc *expandController) Run(stopCh <-chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
glog.Infof("Starting expand controller")
|
||||
defer glog.Infof("Shutting down expand controller")
|
||||
klog.Infof("Starting expand controller")
|
||||
defer klog.Infof("Shutting down expand controller")
|
||||
|
||||
if !controller.WaitForCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced) {
|
||||
return
|
||||
@ -180,9 +183,9 @@ func (expc *expandController) deletePVC(obj interface{}) {
|
||||
}
|
||||
|
||||
func (expc *expandController) pvcUpdate(oldObj, newObj interface{}) {
|
||||
oldPvc, ok := oldObj.(*v1.PersistentVolumeClaim)
|
||||
oldPVC, ok := oldObj.(*v1.PersistentVolumeClaim)
|
||||
|
||||
if oldPvc == nil || !ok {
|
||||
if oldPVC == nil || !ok {
|
||||
return
|
||||
}
|
||||
|
||||
@ -193,7 +196,7 @@ func (expc *expandController) pvcUpdate(oldObj, newObj interface{}) {
|
||||
}
|
||||
|
||||
newSize := newPVC.Spec.Resources.Requests[v1.ResourceStorage]
|
||||
oldSize := oldPvc.Spec.Resources.Requests[v1.ResourceStorage]
|
||||
oldSize := oldPVC.Spec.Resources.Requests[v1.ResourceStorage]
|
||||
|
||||
// We perform additional checks inside resizeMap.AddPVCUpdate function
|
||||
// this check here exists to ensure - we do not consider every
|
||||
@ -201,7 +204,23 @@ func (expc *expandController) pvcUpdate(oldObj, newObj interface{}) {
|
||||
if newSize.Cmp(oldSize) > 0 {
|
||||
pv, err := getPersistentVolume(newPVC, expc.pvLister)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Error getting Persistent Volume for pvc %q : %v", newPVC.UID, err)
|
||||
klog.V(5).Infof("Error getting Persistent Volume for PVC %q : %v", newPVC.UID, err)
|
||||
return
|
||||
}
|
||||
|
||||
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
volumePlugin, err := expc.volumePluginMgr.FindExpandablePluginBySpec(volumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
err = fmt.Errorf("didn't find a plugin capable of expanding the volume; " +
|
||||
"waiting for an external controller to process this PVC")
|
||||
eventType := v1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = v1.EventTypeWarning
|
||||
}
|
||||
expc.recorder.Event(newPVC, eventType, events.ExternalExpanding,
|
||||
fmt.Sprintf("Ignoring the PVC: %v.", err))
|
||||
klog.V(3).Infof("Ignoring the PVC %q (uid: %q) : %v.",
|
||||
util.GetPersistentVolumeClaimQualifiedName(newPVC), newPVC.UID, err)
|
||||
return
|
||||
}
|
||||
expc.resizeMap.AddPVCUpdate(newPVC, pv)
|
||||
@ -268,10 +287,6 @@ func (expc *expandController) GetExec(pluginName string) mount.Exec {
|
||||
return mount.NewOsExec()
|
||||
}
|
||||
|
||||
func (expc *expandController) GetWriter() io.Writer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (expc *expandController) GetHostName() string {
|
||||
return ""
|
||||
}
|
||||
@ -302,6 +317,12 @@ func (expc *expandController) GetServiceAccountTokenFunc() func(_, _ string, _ *
|
||||
}
|
||||
}
|
||||
|
||||
func (expc *expandController) DeleteServiceAccountTokenFunc() func(types.UID) {
|
||||
return func(types.UID) {
|
||||
klog.Errorf("DeleteServiceAccountToken unsupported in expandController")
|
||||
}
|
||||
}
|
||||
|
||||
func (expc *expandController) GetNodeLabels() (map[string]string, error) {
|
||||
return nil, fmt.Errorf("GetNodeLabels unsupported in expandController")
|
||||
}
|
||||
@ -313,3 +334,8 @@ func (expc *expandController) GetNodeName() types.NodeName {
|
||||
func (expc *expandController) GetEventRecorder() record.EventRecorder {
|
||||
return expc.recorder
|
||||
}
|
||||
|
||||
func (expc *expandController) GetCSIClient() csiclientset.Interface {
|
||||
// No volume plugin in expand controller needs csi.storage.k8s.io
|
||||
return nil
|
||||
}
|
||||
|
61
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/pvc_populator.go
generated
vendored
61
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/pvc_populator.go
generated
vendored
@ -21,15 +21,23 @@ limitations under the License.
|
||||
package expand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/events"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/expand/cache"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// PVCPopulator iterates through PVCs and checks if for bound PVCs
|
||||
@ -39,11 +47,13 @@ type PVCPopulator interface {
|
||||
}
|
||||
|
||||
type pvcPopulator struct {
|
||||
loopPeriod time.Duration
|
||||
resizeMap cache.VolumeResizeMap
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
kubeClient clientset.Interface
|
||||
loopPeriod time.Duration
|
||||
resizeMap cache.VolumeResizeMap
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
kubeClient clientset.Interface
|
||||
volumePluginMgr *volume.VolumePluginMgr
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewPVCPopulator(
|
||||
@ -51,14 +61,19 @@ func NewPVCPopulator(
|
||||
resizeMap cache.VolumeResizeMap,
|
||||
pvcLister corelisters.PersistentVolumeClaimLister,
|
||||
pvLister corelisters.PersistentVolumeLister,
|
||||
volumePluginMgr *volume.VolumePluginMgr,
|
||||
kubeClient clientset.Interface) PVCPopulator {
|
||||
populator := &pvcPopulator{
|
||||
loopPeriod: loopPeriod,
|
||||
pvcLister: pvcLister,
|
||||
pvLister: pvLister,
|
||||
resizeMap: resizeMap,
|
||||
kubeClient: kubeClient,
|
||||
loopPeriod: loopPeriod,
|
||||
pvcLister: pvcLister,
|
||||
pvLister: pvLister,
|
||||
resizeMap: resizeMap,
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
populator.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
|
||||
return populator
|
||||
}
|
||||
|
||||
@ -69,7 +84,7 @@ func (populator *pvcPopulator) Run(stopCh <-chan struct{}) {
|
||||
func (populator *pvcPopulator) Sync() {
|
||||
pvcs, err := populator.pvcLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Listing PVCs failed in populator : %v", err)
|
||||
klog.Errorf("Listing PVCs failed in populator : %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -77,9 +92,29 @@ func (populator *pvcPopulator) Sync() {
|
||||
pv, err := getPersistentVolume(pvc, populator.pvLister)
|
||||
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Error getting persistent volume for pvc %q : %v", pvc.UID, err)
|
||||
klog.V(5).Infof("Error getting persistent volume for PVC %q : %v", pvc.UID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter PVCs for which the corresponding volume plugins don't allow expansion.
|
||||
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
|
||||
pvcStatusSize := pvc.Status.Capacity[v1.ResourceStorage]
|
||||
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
volumePlugin, err := populator.volumePluginMgr.FindExpandablePluginBySpec(volumeSpec)
|
||||
if (err != nil || volumePlugin == nil) && pvcStatusSize.Cmp(pvcSize) < 0 {
|
||||
err = fmt.Errorf("didn't find a plugin capable of expanding the volume; " +
|
||||
"waiting for an external controller to process this PVC")
|
||||
eventType := v1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = v1.EventTypeWarning
|
||||
}
|
||||
populator.recorder.Event(pvc, eventType, events.ExternalExpanding,
|
||||
fmt.Sprintf("Ignoring the PVC: %v.", err))
|
||||
klog.V(3).Infof("Ignoring the PVC %q (uid: %q) : %v.",
|
||||
util.GetPersistentVolumeClaimQualifiedName(pvc), pvc.UID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// We are only going to add PVCs which are:
|
||||
// - bound
|
||||
// - pvc.Spec.Size > pvc.Status.Size
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/sync_volume_resize.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/sync_volume_resize.go
generated
vendored
@ -19,11 +19,11 @@ package expand
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/expand/cache"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -66,7 +66,7 @@ func (rc *syncResize) Sync() {
|
||||
uniqueVolumeKey := v1.UniqueVolumeName(pvcWithResizeRequest.UniquePVCKey())
|
||||
updatedClaim, err := markPVCResizeInProgress(pvcWithResizeRequest, rc.kubeClient)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Error setting PVC %s in progress with error : %v", pvcWithResizeRequest.QualifiedName(), err)
|
||||
klog.V(5).Infof("Error setting PVC %s in progress with error : %v", pvcWithResizeRequest.QualifiedName(), err)
|
||||
continue
|
||||
}
|
||||
if updatedClaim != nil {
|
||||
@ -74,16 +74,15 @@ func (rc *syncResize) Sync() {
|
||||
}
|
||||
|
||||
if rc.opsExecutor.IsOperationPending(uniqueVolumeKey, "") {
|
||||
glog.V(10).Infof("Operation for PVC %v is already pending", pvcWithResizeRequest.QualifiedName())
|
||||
klog.V(10).Infof("Operation for PVC %v is already pending", pvcWithResizeRequest.QualifiedName())
|
||||
continue
|
||||
}
|
||||
glog.V(5).Infof("Starting opsExecutor.ExpandVolume for volume %s", pvcWithResizeRequest.QualifiedName())
|
||||
growFuncError := rc.opsExecutor.ExpandVolume(pvcWithResizeRequest, rc.resizeMap)
|
||||
if growFuncError != nil && !exponentialbackoff.IsExponentialBackoff(growFuncError) {
|
||||
glog.Errorf("Error growing pvc %s with %v", pvcWithResizeRequest.QualifiedName(), growFuncError)
|
||||
klog.Errorf("Error growing pvc %s with %v", pvcWithResizeRequest.QualifiedName(), growFuncError)
|
||||
}
|
||||
if growFuncError == nil {
|
||||
glog.V(5).Infof("Started opsExecutor.ExpandVolume for volume %s", pvcWithResizeRequest.QualifiedName())
|
||||
klog.V(5).Infof("Started opsExecutor.ExpandVolume for volume %s", pvcWithResizeRequest.QualifiedName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
103
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD
generated
vendored
103
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD
generated
vendored
@ -13,6 +13,7 @@ go_library(
|
||||
"pv_controller.go",
|
||||
"pv_controller_base.go",
|
||||
"scheduler_assume_cache.go",
|
||||
"scheduler_bind_cache_metrics.go",
|
||||
"scheduler_binder.go",
|
||||
"scheduler_binder_cache.go",
|
||||
"scheduler_binder_fake.go",
|
||||
@ -21,43 +22,44 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume",
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/volume/events:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/metrics:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/goroutinemap:go_default_library",
|
||||
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -68,6 +70,7 @@ go_test(
|
||||
"delete_test.go",
|
||||
"framework_test.go",
|
||||
"index_test.go",
|
||||
"main_test.go",
|
||||
"provision_test.go",
|
||||
"pv_controller_test.go",
|
||||
"recycle_test.go",
|
||||
@ -80,31 +83,33 @@ go_test(
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS
generated
vendored
@ -3,3 +3,9 @@ approvers:
|
||||
- saad-ali
|
||||
- thockin
|
||||
- msau42 # for volume scheduling
|
||||
reviewers:
|
||||
- jsafrane
|
||||
- saad-ali
|
||||
- thockin
|
||||
- msau42 # for volume scheduling
|
||||
- lichuqiang # for volume scheduling
|
||||
|
319
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go
generated
vendored
319
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// Test single call to syncClaim and syncVolume methods.
|
||||
@ -35,8 +37,6 @@ func TestSync(t *testing.T) {
|
||||
"foo": "true",
|
||||
"bar": "false",
|
||||
}
|
||||
modeBlock := v1.PersistentVolumeBlock
|
||||
modeFile := v1.PersistentVolumeFilesystem
|
||||
|
||||
tests := []controllerTest{
|
||||
// [Unit test set 1] User did not care which PV they get.
|
||||
@ -45,7 +45,7 @@ func TestSync(t *testing.T) {
|
||||
{
|
||||
// syncClaim binds to a matching unbound volume.
|
||||
"1-1 - successful bind",
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
|
||||
@ -54,8 +54,8 @@ func TestSync(t *testing.T) {
|
||||
{
|
||||
// syncClaim does not do anything when there is no matching volume.
|
||||
"1-2 - noop",
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending, nil),
|
||||
newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending, nil),
|
||||
[]string{"Normal FailedBinding"},
|
||||
@ -65,8 +65,8 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim resets claim.Status to Pending when there is no
|
||||
// matching volume.
|
||||
"1-3 - reset to Pending",
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimBound, nil),
|
||||
newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimPending, nil),
|
||||
[]string{"Normal FailedBinding"},
|
||||
@ -76,11 +76,11 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim binds claims to the smallest matching volume
|
||||
"1-4 - smallest volume",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-4_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-4_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
},
|
||||
newClaimArray("claim1-4", "uid1-4", "1Gi", "", v1.ClaimPending, nil),
|
||||
@ -92,12 +92,12 @@ func TestSync(t *testing.T) {
|
||||
// name), even though a smaller one is available.
|
||||
"1-5 - prebound volume by name - success",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
newClaimArray("claim1-5", "uid1-5", "1Gi", "", v1.ClaimPending, nil),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -108,12 +108,12 @@ func TestSync(t *testing.T) {
|
||||
// UID), even though a smaller one is available.
|
||||
"1-6 - prebound volume by UID - success",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
newClaimArray("claim1-6", "uid1-6", "1Gi", "", v1.ClaimPending, nil),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -123,8 +123,8 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim does not bind claim to a volume prebound to a claim with
|
||||
// same name and different UID
|
||||
"1-7 - prebound volume to different claim",
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending, nil),
|
||||
newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending, nil),
|
||||
[]string{"Normal FailedBinding"},
|
||||
@ -134,7 +134,7 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim completes binding - simulates controller crash after
|
||||
// PV.ClaimRef is saved
|
||||
"1-8 - complete bind after crash - PV bound",
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newClaimArray("claim1-8", "uid1-8", "1Gi", "", v1.ClaimPending, nil),
|
||||
newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
|
||||
@ -163,7 +163,7 @@ func TestSync(t *testing.T) {
|
||||
{
|
||||
// syncClaim binds a claim only when the label selector matches the volume
|
||||
"1-11 - bind when selector matches",
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -172,8 +172,8 @@ func TestSync(t *testing.T) {
|
||||
{
|
||||
// syncClaim does not bind a claim when the label selector doesn't match
|
||||
"1-12 - do not bind when selector does not match",
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Normal FailedBinding"},
|
||||
@ -182,8 +182,8 @@ func TestSync(t *testing.T) {
|
||||
{
|
||||
// syncClaim does not do anything when binding is delayed
|
||||
"1-13 - delayed binding",
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
|
||||
[]string{"Normal WaitForFirstConsumer"},
|
||||
@ -192,7 +192,7 @@ func TestSync(t *testing.T) {
|
||||
{
|
||||
// syncClaim binds when binding is delayed but PV is prebound to PVC
|
||||
"1-14 - successful prebound PV",
|
||||
newVolumeArray("volume1-1", "1Gi", "", "claim1-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "claim1-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait),
|
||||
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classWait),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, &classWait, annBoundByController, annBindCompleted),
|
||||
@ -203,12 +203,12 @@ func TestSync(t *testing.T) {
|
||||
// even if there is smaller volume available
|
||||
"1-15 - successful prebound PVC",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-15_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-15_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-15_1", "10Gi", "uid1-15", "claim1-15", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimPending, nil),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimBound, nil, annBindCompleted)),
|
||||
@ -218,12 +218,35 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim does not bind pre-bound PVC to PV with different AccessMode
|
||||
"1-16 - successful prebound PVC",
|
||||
// PV has ReadWriteOnce
|
||||
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)),
|
||||
claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim does not bind PVC to non-available PV if it's not pre-bind
|
||||
"1-17 - skip non-available PV if it's not pre-bind",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-17-pending", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-17-failed", "1Gi", "", "", v1.VolumeFailed, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-17-released", "1Gi", "", "", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-17-empty", "1Gi", "", "", "", v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-17-pending", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-17-failed", "1Gi", "", "", v1.VolumeFailed, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-17-released", "1Gi", "", "", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-17-empty", "1Gi", "", "", "", v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
[]*v1.PersistentVolumeClaim{
|
||||
newClaim("claim1-17", "uid1-17", "1Gi", "", v1.ClaimPending, nil),
|
||||
},
|
||||
[]*v1.PersistentVolumeClaim{
|
||||
newClaim("claim1-17", "uid1-17", "1Gi", "", v1.ClaimPending, nil),
|
||||
},
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
|
||||
// [Unit test set 2] User asked for a specific PV.
|
||||
// Test the binding when pv.ClaimRef is already set by controller or
|
||||
@ -251,7 +274,7 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim with claim pre-bound to a PV that exists and is
|
||||
// unbound. Check it gets bound and no annBoundByController is set.
|
||||
"2-3 - claim prebound to unbound volume",
|
||||
newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimPending, nil),
|
||||
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimBound, nil, annBindCompleted),
|
||||
@ -261,7 +284,7 @@ func TestSync(t *testing.T) {
|
||||
// claim with claim pre-bound to a PV that is pre-bound to the claim
|
||||
// by name. Check it gets bound and no annBoundByController is set.
|
||||
"2-4 - claim prebound to prebound volume by name",
|
||||
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimPending, nil),
|
||||
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimBound, nil, annBindCompleted),
|
||||
@ -272,7 +295,7 @@ func TestSync(t *testing.T) {
|
||||
// claim by UID. Check it gets bound and no annBoundByController is
|
||||
// set.
|
||||
"2-5 - claim prebound to prebound volume by UID",
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimPending, nil),
|
||||
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimBound, nil, annBindCompleted),
|
||||
@ -303,7 +326,7 @@ func TestSync(t *testing.T) {
|
||||
// unbound, but does not match the selector. Check it gets bound
|
||||
// and no annBoundByController is set.
|
||||
"2-8 - claim prebound to unbound volume that does not match the selector",
|
||||
newVolumeArray("volume2-8", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-8", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-8", "1Gi", "uid2-8", "claim2-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
withLabelSelector(labels, newClaimArray("claim2-8", "uid2-8", "1Gi", "volume2-8", v1.ClaimPending, nil)),
|
||||
withLabelSelector(labels, newClaimArray("claim2-8", "uid2-8", "1Gi", "volume2-8", v1.ClaimBound, nil, annBindCompleted)),
|
||||
@ -314,8 +337,8 @@ func TestSync(t *testing.T) {
|
||||
// unbound, but its size is smaller than requested.
|
||||
//Check that the claim status is reset to Pending
|
||||
"2-9 - claim prebound to unbound volume that size is smaller than requested",
|
||||
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim2-9", "uid2-9", "2Gi", "volume2-9", v1.ClaimBound, nil),
|
||||
newClaimArray("claim2-9", "uid2-9", "2Gi", "volume2-9", v1.ClaimPending, nil),
|
||||
[]string{"Warning VolumeMismatch"}, noerrors, testSyncClaim,
|
||||
@ -324,8 +347,8 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim with claim pre-bound to a PV that exists and is
|
||||
// unbound, but its class does not match. Check that the claim status is reset to Pending
|
||||
"2-10 - claim prebound to unbound volume that class is different",
|
||||
newVolumeArray("volume2-10", "1Gi", "1", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolumeArray("volume2-10", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolumeArray("volume2-10", "1Gi", "1", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolumeArray("volume2-10", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newClaimArray("claim2-10", "uid2-10", "1Gi", "volume2-10", v1.ClaimBound, nil),
|
||||
newClaimArray("claim2-10", "uid2-10", "1Gi", "volume2-10", v1.ClaimPending, nil),
|
||||
[]string{"Warning VolumeMismatch"}, noerrors, testSyncClaim,
|
||||
@ -356,7 +379,7 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim with claim bound to unbound volume. Check it's bound.
|
||||
// Also check that Pending phase is set to Bound
|
||||
"3-3 - bound claim with unbound volume",
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, nil, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
|
||||
@ -366,8 +389,8 @@ func TestSync(t *testing.T) {
|
||||
// syncClaim with claim bound to volume with missing (or different)
|
||||
// volume.Spec.ClaimRef.UID. Check that the claim is marked as lost.
|
||||
"3-4 - bound claim with prebound volume",
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimPending, nil, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimLost, nil, annBoundByController, annBindCompleted),
|
||||
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
|
||||
@ -377,7 +400,7 @@ func TestSync(t *testing.T) {
|
||||
// controller does not do anything. Also check that Pending phase is
|
||||
// set to Bound
|
||||
"3-5 - bound claim with bound volume",
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimPending, nil, annBindCompleted),
|
||||
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimBound, nil, annBindCompleted),
|
||||
@ -388,8 +411,8 @@ func TestSync(t *testing.T) {
|
||||
// claim. Check that the claim is marked as lost.
|
||||
// TODO: test that an event is emitted
|
||||
"3-6 - bound claim with bound volume",
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimPending, nil, annBindCompleted),
|
||||
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimLost, nil, annBindCompleted),
|
||||
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
|
||||
@ -399,7 +422,7 @@ func TestSync(t *testing.T) {
|
||||
// even if the claim's selector doesn't match the volume. Also
|
||||
// check that Pending phase is set to Bound
|
||||
"3-7 - bound claim with unbound volume where selector doesn't match",
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, nil, annBoundByController, annBindCompleted)),
|
||||
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -409,7 +432,7 @@ func TestSync(t *testing.T) {
|
||||
{
|
||||
// syncVolume with pending volume. Check it's marked as Available.
|
||||
"4-1 - pending volume",
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
noclaims,
|
||||
noclaims,
|
||||
@ -419,7 +442,7 @@ func TestSync(t *testing.T) {
|
||||
// syncVolume with prebound pending volume. Check it's marked as
|
||||
// Available.
|
||||
"4-2 - pending prebound volume",
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
noclaims,
|
||||
noclaims,
|
||||
@ -503,11 +526,11 @@ func TestSync(t *testing.T) {
|
||||
// smaller PV available
|
||||
"13-1 - binding to class",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume13-1-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume13-1-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume13-1-2", "10Gi", "uid13-1", "claim13-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
|
||||
},
|
||||
newClaimArray("claim13-1", "uid13-1", "1Gi", "", v1.ClaimPending, &classGold),
|
||||
@ -519,11 +542,11 @@ func TestSync(t *testing.T) {
|
||||
// smaller PV with a class available
|
||||
"13-2 - binding without a class",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolume("volume13-2-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolume("volume13-2-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolume("volume13-2-2", "10Gi", "uid13-2", "claim13-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
},
|
||||
newClaimArray("claim13-2", "uid13-2", "1Gi", "", v1.ClaimPending, nil),
|
||||
@ -535,11 +558,11 @@ func TestSync(t *testing.T) {
|
||||
// smaller PV with different class available
|
||||
"13-3 - binding to specific a class",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classSilver),
|
||||
newVolume("volume13-3-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classSilver),
|
||||
newVolume("volume13-3-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classSilver),
|
||||
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classSilver),
|
||||
newVolume("volume13-3-2", "10Gi", "uid13-3", "claim13-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
|
||||
},
|
||||
newClaimArray("claim13-3", "uid13-3", "1Gi", "", v1.ClaimPending, &classGold),
|
||||
@ -550,7 +573,7 @@ func TestSync(t *testing.T) {
|
||||
// syncVolume binds claim requesting class "" to claim to PV with
|
||||
// class=""
|
||||
"13-4 - empty class",
|
||||
newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume13-4", "1Gi", "uid13-4", "claim13-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newClaimArray("claim13-4", "uid13-4", "1Gi", "", v1.ClaimPending, &classEmpty),
|
||||
newClaimArray("claim13-4", "uid13-4", "1Gi", "volume13-4", v1.ClaimBound, &classEmpty, annBoundByController, annBindCompleted),
|
||||
@ -560,65 +583,14 @@ func TestSync(t *testing.T) {
|
||||
// syncVolume binds claim requesting class nil to claim to PV with
|
||||
// class = ""
|
||||
"13-5 - nil class",
|
||||
newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume13-5", "1Gi", "uid13-5", "claim13-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newClaimArray("claim13-5", "uid13-5", "1Gi", "", v1.ClaimPending, nil),
|
||||
newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
|
||||
// All of these should bind as feature set is not enabled for BlockVolume
|
||||
// meaning volumeMode will be ignored and dropped
|
||||
{
|
||||
// syncVolume binds a requested block claim to a block volume
|
||||
"14-1 - binding to volumeMode block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to a filesystem volume
|
||||
"14-2 - binding to volumeMode filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds an unspecified volumemode for claim to a specified filesystem volume
|
||||
"14-3 - binding to volumeMode filesystem using default for claim",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "uid14-3", "claim14-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "volume14-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-4 - binding to unspecified volumeMode using requested filesystem for claim",
|
||||
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "uid14-4", "claim14-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "volume14-4", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-5 - binding different volumeModes should be ignored",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "uid14-5", "claim14-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "volume14-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
}
|
||||
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
|
||||
runSyncTests(t, tests, []*storage.StorageClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: classWait},
|
||||
@ -627,7 +599,69 @@ func TestSync(t *testing.T) {
|
||||
}, []*v1.Pod{})
|
||||
}
|
||||
|
||||
func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
func TestSyncBlockVolumeDisabled(t *testing.T) {
|
||||
modeBlock := v1.PersistentVolumeBlock
|
||||
modeFile := v1.PersistentVolumeFilesystem
|
||||
// All of these should bind as feature set is not enabled for BlockVolume
|
||||
// meaning volumeMode will be ignored and dropped
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// syncVolume binds a requested block claim to a block volume
|
||||
"14-1 - binding to volumeMode block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to a filesystem volume
|
||||
"14-2 - binding to volumeMode filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds an unspecified volumemode for claim to a specified filesystem volume
|
||||
"14-3 - binding to volumeMode filesystem using default for claim",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "uid14-3", "claim14-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "volume14-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-4 - binding to unspecified volumeMode using requested filesystem for claim",
|
||||
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "uid14-4", "claim14-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "volume14-4", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-5 - binding different volumeModes should be ignored",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "uid14-5", "claim14-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "volume14-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
}
|
||||
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, false)()
|
||||
runSyncTests(t, tests, []*storage.StorageClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: classWait},
|
||||
VolumeBindingMode: &modeWait,
|
||||
},
|
||||
}, []*v1.Pod{})
|
||||
}
|
||||
|
||||
func TestSyncBlockVolume(t *testing.T) {
|
||||
modeBlock := v1.PersistentVolumeBlock
|
||||
modeFile := v1.PersistentVolumeFilesystem
|
||||
|
||||
@ -637,7 +671,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume binds a requested block claim to a block volume
|
||||
"14-1 - binding to volumeMode block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -646,7 +680,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to a filesystem volume
|
||||
"14-2 - binding to volumeMode filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -655,8 +689,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// failed syncVolume do not bind to an unspecified volumemode for claim to a specified filesystem volume
|
||||
"14-3 - do not bind pv volumeMode filesystem and pvc volumeMode block",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Normal FailedBinding"},
|
||||
@ -665,8 +699,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// failed syncVolume do not bind a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-4 - do not bind pv volumeMode block and pvc volumeMode filesystem",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Normal FailedBinding"},
|
||||
@ -675,8 +709,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// failed syncVolume do not bind when matching class but not matching volumeModes
|
||||
"14-5 - do not bind when matching class but not volumeMode",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)),
|
||||
[]string{"Warning ProvisioningFailed"},
|
||||
@ -685,8 +719,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// failed syncVolume do not bind when matching volumeModes but class does not match
|
||||
"14-5-1 - do not bind when matching volumeModes but class does not match",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)),
|
||||
[]string{"Warning ProvisioningFailed"},
|
||||
@ -695,8 +729,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// failed syncVolume do not bind when pvc is prebound to pv with matching volumeModes but class does not match
|
||||
"14-5-2 - do not bind when pvc is prebound to pv with matching volumeModes but class does not match",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)),
|
||||
[]string{"Warning VolumeMismatch"},
|
||||
@ -705,7 +739,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume bind when pv is prebound and volumeModes match
|
||||
"14-7 - bind when pv volume is prebound and volumeModes match",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "", "claim14-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "", "claim14-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "uid14-7", "claim14-7", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "volume14-7", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -714,8 +748,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
|
||||
"14-8 - do not bind when pvc is prebound to pv with mismatching volumeModes",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)),
|
||||
[]string{"Warning VolumeMismatch"},
|
||||
@ -724,8 +758,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
|
||||
"14-8-1 - do not bind when pvc is prebound to pv with mismatching volumeModes",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Normal FailedBinding"},
|
||||
@ -734,7 +768,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume binds when pvc is prebound to pv with matching volumeModes block
|
||||
"14-9 - bind when pvc is prebound to pv with matching volumeModes block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "uid14-9", "claim14-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimBound, nil, annBindCompleted)),
|
||||
@ -743,7 +777,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume binds when pv is prebound to pvc with matching volumeModes block
|
||||
"14-10 - bind when pv is prebound to pvc with matching volumeModes block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "", "claim14-10", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "", "claim14-10", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "uid14-10", "claim14-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "volume14-10", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -752,7 +786,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume binds when pvc is prebound to pv with matching volumeModes filesystem
|
||||
"14-11 - bind when pvc is prebound to pv with matching volumeModes filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "uid14-11", "claim14-11", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimBound, nil, annBindCompleted)),
|
||||
@ -761,7 +795,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume binds when pv is prebound to pvc with matching volumeModes filesystem
|
||||
"14-12 - bind when pv is prebound to pvc with matching volumeModes filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "", "claim14-12", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "", "claim14-12", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "uid14-12", "claim14-12", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "volume14-12", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
@ -770,8 +804,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume output warning when pv is prebound to pvc with mismatching volumeMode
|
||||
"14-13 - output warning when pv is prebound to pvc with different volumeModes",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Warning VolumeMismatch"},
|
||||
@ -780,8 +814,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume output warning when pv is prebound to pvc with mismatching volumeMode
|
||||
"14-13-1 - output warning when pv is prebound to pvc with different volumeModes",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Warning VolumeMismatch"},
|
||||
@ -790,8 +824,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode block
|
||||
"14-14 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
@ -799,20 +833,15 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
{
|
||||
// syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode file
|
||||
"14-14-1 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes file",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
}
|
||||
|
||||
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to enable feature gate for BlockVolume: %v", err)
|
||||
return
|
||||
}
|
||||
defer utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{})
|
||||
}
|
||||
|
76
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go
generated
vendored
76
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
@ -38,6 +38,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@ -48,6 +49,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||
)
|
||||
@ -159,7 +161,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
glog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource())
|
||||
klog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource())
|
||||
|
||||
// Inject error when requested
|
||||
err = r.injectReactError(action)
|
||||
@ -179,11 +181,17 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
||||
return true, nil, fmt.Errorf("Cannot create volume %s: volume already exists", volume.Name)
|
||||
}
|
||||
|
||||
// mimic apiserver defaulting
|
||||
if volume.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volume.Spec.VolumeMode = new(v1.PersistentVolumeMode)
|
||||
*volume.Spec.VolumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
|
||||
// Store the updated object to appropriate places.
|
||||
r.volumes[volume.Name] = volume
|
||||
r.changedObjects = append(r.changedObjects, volume)
|
||||
r.changedSinceLastSync++
|
||||
glog.V(4).Infof("created volume %s", volume.Name)
|
||||
klog.V(4).Infof("created volume %s", volume.Name)
|
||||
return true, volume, nil
|
||||
|
||||
case action.Matches("update", "persistentvolumes"):
|
||||
@ -209,7 +217,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
||||
r.volumes[volume.Name] = volume
|
||||
r.changedObjects = append(r.changedObjects, volume)
|
||||
r.changedSinceLastSync++
|
||||
glog.V(4).Infof("saved updated volume %s", volume.Name)
|
||||
klog.V(4).Infof("saved updated volume %s", volume.Name)
|
||||
return true, volume, nil
|
||||
|
||||
case action.Matches("update", "persistentvolumeclaims"):
|
||||
@ -235,23 +243,23 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
||||
r.claims[claim.Name] = claim
|
||||
r.changedObjects = append(r.changedObjects, claim)
|
||||
r.changedSinceLastSync++
|
||||
glog.V(4).Infof("saved updated claim %s", claim.Name)
|
||||
klog.V(4).Infof("saved updated claim %s", claim.Name)
|
||||
return true, claim, nil
|
||||
|
||||
case action.Matches("get", "persistentvolumes"):
|
||||
name := action.(core.GetAction).GetName()
|
||||
volume, found := r.volumes[name]
|
||||
if found {
|
||||
glog.V(4).Infof("GetVolume: found %s", volume.Name)
|
||||
klog.V(4).Infof("GetVolume: found %s", volume.Name)
|
||||
return true, volume, nil
|
||||
} else {
|
||||
glog.V(4).Infof("GetVolume: volume %s not found", name)
|
||||
klog.V(4).Infof("GetVolume: volume %s not found", name)
|
||||
return true, nil, fmt.Errorf("Cannot find volume %s", name)
|
||||
}
|
||||
|
||||
case action.Matches("delete", "persistentvolumes"):
|
||||
name := action.(core.DeleteAction).GetName()
|
||||
glog.V(4).Infof("deleted volume %s", name)
|
||||
klog.V(4).Infof("deleted volume %s", name)
|
||||
_, found := r.volumes[name]
|
||||
if found {
|
||||
delete(r.volumes, name)
|
||||
@ -263,7 +271,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
||||
|
||||
case action.Matches("delete", "persistentvolumeclaims"):
|
||||
name := action.(core.DeleteAction).GetName()
|
||||
glog.V(4).Infof("deleted claim %s", name)
|
||||
klog.V(4).Infof("deleted claim %s", name)
|
||||
_, found := r.volumes[name]
|
||||
if found {
|
||||
delete(r.claims, name)
|
||||
@ -286,11 +294,11 @@ func (r *volumeReactor) injectReactError(action core.Action) error {
|
||||
}
|
||||
|
||||
for i, expected := range r.errors {
|
||||
glog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource())
|
||||
klog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource())
|
||||
if action.Matches(expected.verb, expected.resource) {
|
||||
// That's the action we're waiting for, remove it from injectedErrors
|
||||
r.errors = append(r.errors[:i], r.errors[i+1:]...)
|
||||
glog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error)
|
||||
klog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error)
|
||||
return expected.error
|
||||
}
|
||||
}
|
||||
@ -379,14 +387,14 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo
|
||||
select {
|
||||
case event, ok := <-fakeRecorder.Events:
|
||||
if ok {
|
||||
glog.V(5).Infof("event recorder got event %s", event)
|
||||
klog.V(5).Infof("event recorder got event %s", event)
|
||||
gotEvents = append(gotEvents, event)
|
||||
} else {
|
||||
glog.V(5).Infof("event recorder finished")
|
||||
klog.V(5).Infof("event recorder finished")
|
||||
finished = true
|
||||
}
|
||||
case _, _ = <-timer.C:
|
||||
glog.V(5).Infof("event recorder timeout")
|
||||
klog.V(5).Infof("event recorder timeout")
|
||||
finished = true
|
||||
}
|
||||
}
|
||||
@ -426,10 +434,10 @@ func (r *volumeReactor) popChange() interface{} {
|
||||
switch obj.(type) {
|
||||
case *v1.PersistentVolume:
|
||||
vol, _ := obj.(*v1.PersistentVolume)
|
||||
glog.V(4).Infof("reactor queue: %s", vol.Name)
|
||||
klog.V(4).Infof("reactor queue: %s", vol.Name)
|
||||
case *v1.PersistentVolumeClaim:
|
||||
claim, _ := obj.(*v1.PersistentVolumeClaim)
|
||||
glog.V(4).Infof("reactor queue: %s", claim.Name)
|
||||
klog.V(4).Infof("reactor queue: %s", claim.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -630,6 +638,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
|
||||
|
||||
// newVolume returns a new volume with given attributes
|
||||
func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase v1.PersistentVolumePhase, reclaimPolicy v1.PersistentVolumeReclaimPolicy, class string, annotations ...string) *v1.PersistentVolume {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
volume := v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -645,6 +654,7 @@ func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase v
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},
|
||||
PersistentVolumeReclaimPolicy: reclaimPolicy,
|
||||
StorageClassName: class,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: phase,
|
||||
@ -740,6 +750,7 @@ func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, ph
|
||||
|
||||
// newClaim returns a new claim with given attributes
|
||||
func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.PersistentVolumeClaimPhase, class *string, annotations ...string) *v1.PersistentVolumeClaim {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
claim := v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -756,6 +767,7 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.Persisten
|
||||
},
|
||||
VolumeName: boundToVolume,
|
||||
StorageClassName: class,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: phase,
|
||||
@ -898,7 +910,7 @@ func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(c
|
||||
// Inject a hook before async operation starts
|
||||
ctrl.preOperationHook = func(operationName string) {
|
||||
// Inside the hook, run the function to inject
|
||||
glog.V(4).Infof("reactor: scheduleOperation reached, injecting call")
|
||||
klog.V(4).Infof("reactor: scheduleOperation reached, injecting call")
|
||||
injectBeforeOperation(ctrl, reactor)
|
||||
}
|
||||
|
||||
@ -945,7 +957,7 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto
|
||||
// 3. Compare resulting volumes and claims with expected volumes and claims.
|
||||
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {
|
||||
for _, test := range tests {
|
||||
glog.V(4).Infof("starting test %q", test.name)
|
||||
klog.V(4).Infof("starting test %q", test.name)
|
||||
|
||||
// Initialize the controller
|
||||
client := &fake.Clientset{}
|
||||
@ -1008,7 +1020,7 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
|
||||
// Some limit of calls in enforced to prevent endless loops.
|
||||
func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {
|
||||
for _, test := range tests {
|
||||
glog.V(4).Infof("starting multisync test %q", test.name)
|
||||
klog.V(4).Infof("starting multisync test %q", test.name)
|
||||
|
||||
// Initialize the controller
|
||||
client := &fake.Clientset{}
|
||||
@ -1046,7 +1058,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
||||
counter := 0
|
||||
for {
|
||||
counter++
|
||||
glog.V(4).Infof("test %q: iteration %d", test.name, counter)
|
||||
klog.V(4).Infof("test %q: iteration %d", test.name, counter)
|
||||
|
||||
if counter > 100 {
|
||||
t.Errorf("Test %q failed: too many iterations", test.name)
|
||||
@ -1064,7 +1076,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
||||
// Simulate "periodic sync" of everything (until it produces
|
||||
// no changes).
|
||||
firstSync = false
|
||||
glog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name)
|
||||
klog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name)
|
||||
reactor.syncAll()
|
||||
} else {
|
||||
// Last sync did not produce any updates, the test reached
|
||||
@ -1085,7 +1097,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
||||
if err != nil {
|
||||
if err == versionConflictError {
|
||||
// Ignore version errors
|
||||
glog.V(4).Infof("test intentionaly ignores version error.")
|
||||
klog.V(4).Infof("test intentionaly ignores version error.")
|
||||
} else {
|
||||
t.Errorf("Error calling syncClaim: %v", err)
|
||||
// Finish the loop on the first error
|
||||
@ -1102,7 +1114,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
||||
if err != nil {
|
||||
if err == versionConflictError {
|
||||
// Ignore version errors
|
||||
glog.V(4).Infof("test intentionaly ignores version error.")
|
||||
klog.V(4).Infof("test intentionaly ignores version error.")
|
||||
} else {
|
||||
t.Errorf("Error calling syncVolume: %v", err)
|
||||
// Finish the loop on the first error
|
||||
@ -1114,7 +1126,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
||||
}
|
||||
}
|
||||
evaluateTestResults(ctrl, reactor, test, t)
|
||||
glog.V(4).Infof("test %q finished after %d iterations", test.name, counter)
|
||||
klog.V(4).Infof("test %q finished after %d iterations", test.name, counter)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1185,7 +1197,7 @@ func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol
|
||||
func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.Provisioner, error) {
|
||||
if len(plugin.provisionCalls) > 0 {
|
||||
// mockVolumePlugin directly implements Provisioner interface
|
||||
glog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
|
||||
klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
|
||||
plugin.provisionOptions = options
|
||||
return plugin, nil
|
||||
} else {
|
||||
@ -1201,7 +1213,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
|
||||
var pv *v1.PersistentVolume
|
||||
call := plugin.provisionCalls[plugin.provisionCallCounter]
|
||||
if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) {
|
||||
glog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
|
||||
klog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
|
||||
return nil, fmt.Errorf("Mock plugin error: invalid provisioner call")
|
||||
}
|
||||
if call.ret == nil {
|
||||
@ -1222,11 +1234,15 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
pv.Spec.VolumeMode = plugin.provisionOptions.PVC.Spec.VolumeMode
|
||||
}
|
||||
|
||||
plugin.provisionCallCounter++
|
||||
glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
|
||||
klog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
|
||||
return pv, call.ret
|
||||
}
|
||||
|
||||
@ -1235,7 +1251,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
|
||||
func (plugin *mockVolumePlugin) NewDeleter(spec *vol.Spec) (vol.Deleter, error) {
|
||||
if len(plugin.deleteCalls) > 0 {
|
||||
// mockVolumePlugin directly implements Deleter interface
|
||||
glog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
|
||||
klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
|
||||
return plugin, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured")
|
||||
@ -1248,7 +1264,7 @@ func (plugin *mockVolumePlugin) Delete() error {
|
||||
}
|
||||
ret := plugin.deleteCalls[plugin.deleteCallCounter]
|
||||
plugin.deleteCallCounter++
|
||||
glog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret)
|
||||
klog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret)
|
||||
return ret
|
||||
}
|
||||
|
||||
@ -1274,6 +1290,6 @@ func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventReco
|
||||
}
|
||||
ret := plugin.recycleCalls[plugin.recycleCallCounter]
|
||||
plugin.recycleCallCounter++
|
||||
glog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret)
|
||||
klog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret)
|
||||
return ret
|
||||
}
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go
generated
vendored
@ -158,13 +158,13 @@ func findMatchingVolume(
|
||||
|
||||
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
|
||||
|
||||
// check if volumeModes do not match (Alpha and feature gate protected)
|
||||
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
|
||||
// check if volumeModes do not match (feature gate protected)
|
||||
isMismatch, err := checkVolumeModeMismatches(&claim.Spec, &volume.Spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
|
||||
}
|
||||
// filter out mismatching volumeModes
|
||||
if isMisMatch {
|
||||
if isMismatch {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -211,11 +211,18 @@ func findMatchingVolume(
|
||||
}
|
||||
|
||||
// filter out:
|
||||
// - volumes in non-available phase
|
||||
// - volumes bound to another claim
|
||||
// - volumes whose labels don't match the claim's selector, if specified
|
||||
// - volumes in Class that is not requested
|
||||
// - volumes whose NodeAffinity does not match the node
|
||||
if volume.Spec.ClaimRef != nil {
|
||||
if volume.Status.Phase != v1.VolumeAvailable {
|
||||
// We ignore volumes in non-available phase, because volumes that
|
||||
// satisfies matching criteria will be updated to available, binding
|
||||
// them now has high chance of encountering unnecessary failures
|
||||
// due to API conflicts.
|
||||
continue
|
||||
} else if volume.Spec.ClaimRef != nil {
|
||||
continue
|
||||
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
|
||||
continue
|
||||
@ -251,25 +258,24 @@ func findMatchingVolume(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// checkVolumeModeMatches is a convenience method that checks volumeMode for PersistentVolume
|
||||
// and PersistentVolumeClaims along with making sure that the Alpha feature gate BlockVolume is
|
||||
// enabled.
|
||||
// This is Alpha and could change in the future.
|
||||
func checkVolumeModeMisMatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
if pvSpec.VolumeMode != nil && pvcSpec.VolumeMode != nil {
|
||||
requestedVolumeMode := *pvcSpec.VolumeMode
|
||||
pvVolumeMode := *pvSpec.VolumeMode
|
||||
return requestedVolumeMode != pvVolumeMode, nil
|
||||
} else {
|
||||
// This also should retrun an error, this means that
|
||||
// the defaulting has failed.
|
||||
return true, fmt.Errorf("api defaulting for volumeMode failed")
|
||||
}
|
||||
} else {
|
||||
// feature gate is disabled
|
||||
// checkVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume
|
||||
// and PersistentVolumeClaims
|
||||
func checkVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager.
|
||||
// So we default a nil volumeMode to filesystem
|
||||
requestedVolumeMode := v1.PersistentVolumeFilesystem
|
||||
if pvcSpec.VolumeMode != nil {
|
||||
requestedVolumeMode = *pvcSpec.VolumeMode
|
||||
}
|
||||
pvVolumeMode := v1.PersistentVolumeFilesystem
|
||||
if pvSpec.VolumeMode != nil {
|
||||
pvVolumeMode = *pvSpec.VolumeMode
|
||||
}
|
||||
return requestedVolumeMode != pvVolumeMode, nil
|
||||
}
|
||||
|
||||
// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage
|
||||
|
461
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go
generated
vendored
461
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go
generated
vendored
@ -24,13 +24,16 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
pvc := v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claim01",
|
||||
@ -43,6 +46,7 @@ func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentV
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(size),
|
||||
},
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
if modfn != nil {
|
||||
@ -195,6 +199,7 @@ func TestMatchVolume(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMatchingWithBoundVolumes(t *testing.T) {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
volumeIndex := newPersistentVolumeOrderedIndex()
|
||||
// two similar volumes, one is bound
|
||||
pv1 := &v1.PersistentVolume{
|
||||
@ -211,7 +216,11 @@ func TestMatchingWithBoundVolumes(t *testing.T) {
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},
|
||||
// this one we're pretending is already bound
|
||||
ClaimRef: &v1.ObjectReference{UID: "abc123"},
|
||||
ClaimRef: &v1.ObjectReference{UID: "abc123"},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeBound,
|
||||
},
|
||||
}
|
||||
|
||||
@ -228,6 +237,10 @@ func TestMatchingWithBoundVolumes(t *testing.T) {
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
@ -246,6 +259,7 @@ func TestMatchingWithBoundVolumes(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
|
||||
@ -320,6 +334,7 @@ func TestAllPossibleAccessModes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
gce := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "gce"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
@ -329,6 +344,10 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
@ -340,6 +359,10 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
@ -353,6 +376,10 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
@ -364,6 +391,7 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
|
||||
@ -423,6 +451,7 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
|
||||
}
|
||||
|
||||
func createTestVolumes() []*v1.PersistentVolume {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
// these volumes are deliberately out-of-order to test indexing and sorting
|
||||
return []*v1.PersistentVolume{
|
||||
{
|
||||
@ -441,6 +470,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -460,7 +493,11 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
// this one we're pretending is already bound
|
||||
ClaimRef: &v1.ObjectReference{UID: "def456"},
|
||||
ClaimRef: &v1.ObjectReference{UID: "def456"},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeBound,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -473,13 +510,17 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("5G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{},
|
||||
Glusterfs: &v1.GlusterfsPersistentVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -499,7 +540,11 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
// this one we're pretending is already bound
|
||||
ClaimRef: &v1.ObjectReference{UID: "abc123"},
|
||||
ClaimRef: &v1.ObjectReference{UID: "abc123"},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeBound,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -512,13 +557,17 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{},
|
||||
Glusterfs: &v1.GlusterfsPersistentVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -537,6 +586,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -549,13 +602,17 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{},
|
||||
Glusterfs: &v1.GlusterfsPersistentVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -576,6 +633,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -597,6 +658,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
StorageClassName: classSilver,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -615,6 +680,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
StorageClassName: classSilver,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -633,6 +702,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
StorageClassName: classGold,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -653,6 +726,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadWriteMany,
|
||||
},
|
||||
StorageClassName: classLarge,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -673,6 +750,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadWriteMany,
|
||||
},
|
||||
StorageClassName: classLarge,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -693,6 +774,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -713,6 +798,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -734,6 +823,10 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
StorageClassName: classWait,
|
||||
ClaimRef: &v1.ObjectReference{Name: "claim02", Namespace: "myns"},
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -754,12 +847,110 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value3"),
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "affinity-pv4-pending",
|
||||
Name: "affinity004-pending",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value4"),
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumePending,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "affinity-pv4-failed",
|
||||
Name: "affinity004-failed",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value4"),
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeFailed,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "affinity-pv4-released",
|
||||
Name: "affinity004-released",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value4"),
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeReleased,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "affinity-pv4-empty",
|
||||
Name: "affinity004-empty",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value4"),
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testVolume(name, size string) *v1.PersistentVolume {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -769,6 +960,10 @@ func testVolume(name, size string) *v1.PersistentVolume {
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(size)},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -811,6 +1006,9 @@ func createVolumeModeBlockTestVolume() *v1.PersistentVolume {
|
||||
},
|
||||
VolumeMode: &blockMode,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -834,6 +1032,32 @@ func createVolumeModeFilesystemTestVolume() *v1.PersistentVolume {
|
||||
},
|
||||
VolumeMode: &filesystemMode,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createVolumeModeNilTestVolume() *v1.PersistentVolume {
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "local-1",
|
||||
Name: "nil-mode",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -843,28 +1067,7 @@ func createTestVolOrderedIndex(pv *v1.PersistentVolume) persistentVolumeOrderedI
|
||||
return volFile
|
||||
}
|
||||
|
||||
func toggleFeature(toggleFlag bool, featureName string, t *testing.T) {
|
||||
var valueStr string
|
||||
if toggleFlag {
|
||||
// Enable feature
|
||||
valueStr = featureName + "=true"
|
||||
err := utilfeature.DefaultFeatureGate.Set(valueStr)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to enable feature gate for %s: %v", featureName, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Disable feature
|
||||
valueStr = featureName + "=false"
|
||||
err := utilfeature.DefaultFeatureGate.Set(valueStr)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to disable feature gate for %s: %v", featureName, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlphaVolumeModeCheck(t *testing.T) {
|
||||
func TestVolumeModeCheck(t *testing.T) {
|
||||
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
filesystemMode := v1.PersistentVolumeFilesystem
|
||||
@ -872,55 +1075,85 @@ func TestAlphaVolumeModeCheck(t *testing.T) {
|
||||
// If feature gate is enabled, VolumeMode will always be defaulted
|
||||
// If feature gate is disabled, VolumeMode is dropped by API and ignored
|
||||
scenarios := map[string]struct {
|
||||
isExpectedMisMatch bool
|
||||
isExpectedMismatch bool
|
||||
vol *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
enableBlock bool
|
||||
}{
|
||||
"feature enabled - pvc block and pv filesystem": {
|
||||
isExpectedMisMatch: true,
|
||||
isExpectedMismatch: true,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc filesystem and pv block": {
|
||||
isExpectedMisMatch: true,
|
||||
isExpectedMismatch: true,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc block and pv block": {
|
||||
isExpectedMisMatch: false,
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc filesystem and pv filesystem": {
|
||||
isExpectedMisMatch: false,
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc filesystem and pv nil": {
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeNilTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc nil and pv filesytem": {
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", nil, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc nil and pv nil": {
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeNilTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", nil, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc nil and pv block": {
|
||||
isExpectedMismatch: true,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", nil, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc block and pv nil": {
|
||||
isExpectedMismatch: true,
|
||||
vol: createVolumeModeNilTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature disabled - pvc block and pv filesystem": {
|
||||
isExpectedMisMatch: false,
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"feature disabled - pvc filesystem and pv block": {
|
||||
isExpectedMisMatch: false,
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"feature disabled - pvc block and pv block": {
|
||||
isExpectedMisMatch: false,
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"feature disabled - pvc filesystem and pv filesystem": {
|
||||
isExpectedMisMatch: false,
|
||||
isExpectedMismatch: false,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: false,
|
||||
@ -928,25 +1161,23 @@ func TestAlphaVolumeModeCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
toggleFeature(scenario.enableBlock, "BlockVolume", t)
|
||||
expectedMisMatch, err := checkVolumeModeMisMatches(&scenario.pvc.Spec, &scenario.vol.Spec)
|
||||
recover := utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock)
|
||||
expectedMismatch, err := checkVolumeModeMismatches(&scenario.pvc.Spec, &scenario.vol.Spec)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected failure for checkVolumeModeMisMatches: %v", err)
|
||||
t.Errorf("Unexpected failure for checkVolumeModeMismatches: %v", err)
|
||||
}
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if expectedMisMatch && !scenario.isExpectedMisMatch {
|
||||
if expectedMismatch && !scenario.isExpectedMismatch {
|
||||
t.Errorf("Unexpected failure for scenario, expected not to mismatch on modes but did: %s", name)
|
||||
}
|
||||
if !expectedMisMatch && scenario.isExpectedMisMatch {
|
||||
if !expectedMismatch && scenario.isExpectedMismatch {
|
||||
t.Errorf("Unexpected failure for scenario, did not mismatch on mode when expected to mismatch: %s", name)
|
||||
}
|
||||
recover()
|
||||
}
|
||||
|
||||
// make sure feature gate is turned off
|
||||
toggleFeature(false, "BlockVolume", t)
|
||||
}
|
||||
|
||||
func TestAlphaFilteringVolumeModes(t *testing.T) {
|
||||
func TestFilteringVolumeModes(t *testing.T) {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
filesystemMode := v1.PersistentVolumeFilesystem
|
||||
|
||||
@ -1021,7 +1252,7 @@ func TestAlphaFilteringVolumeModes(t *testing.T) {
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
toggleFeature(scenario.enableBlock, "BlockVolume", t)
|
||||
recover := utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock)
|
||||
pvmatch, err := scenario.vol.findBestMatchForClaim(scenario.pvc, false)
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if pvmatch == nil && scenario.isExpectedMatch {
|
||||
@ -1037,13 +1268,12 @@ func TestAlphaFilteringVolumeModes(t *testing.T) {
|
||||
if err != nil && !scenario.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for scenario: %s - %+v", name, err)
|
||||
}
|
||||
recover()
|
||||
}
|
||||
|
||||
// make sure feature gate is turned off
|
||||
toggleFeature(false, "BlockVolume", t)
|
||||
}
|
||||
|
||||
func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
func TestStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv1",
|
||||
@ -1053,6 +1283,10 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1068,6 +1302,7 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1078,43 +1313,45 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
enableStorageObjectInUseProtection bool
|
||||
}{
|
||||
"feature enabled - pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: pv,
|
||||
pvc: pvc,
|
||||
isExpectedMatch: true,
|
||||
vol: pv,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
},
|
||||
"feature enabled - pv deletionTimeStamp set": {
|
||||
isExpectedMatch: false,
|
||||
vol: pvToDelete,
|
||||
pvc: pvc,
|
||||
isExpectedMatch: false,
|
||||
vol: pvToDelete,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
},
|
||||
"feature disabled - pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: pv,
|
||||
pvc: pvc,
|
||||
isExpectedMatch: true,
|
||||
vol: pv,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: false,
|
||||
},
|
||||
"feature disabled - pv deletionTimeStamp set": {
|
||||
isExpectedMatch: true,
|
||||
vol: pvToDelete,
|
||||
pvc: pvc,
|
||||
isExpectedMatch: true,
|
||||
vol: pvToDelete,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range satisfyingTestCases {
|
||||
toggleFeature(testCase.enableStorageObjectInUseProtection, "StorageObjectInUseProtection", t)
|
||||
err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc)
|
||||
// expected to match but got an error
|
||||
if err != nil && testCase.isExpectedMatch {
|
||||
t.Errorf("%s: expected to match but got an error: %v", name, err)
|
||||
}
|
||||
// not expected to match but did
|
||||
if err == nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("%s: not expected to match but did", name)
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)()
|
||||
|
||||
err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc)
|
||||
// expected to match but got an error
|
||||
if err != nil && testCase.isExpectedMatch {
|
||||
t.Errorf("%s: expected to match but got an error: %v", name, err)
|
||||
}
|
||||
// not expected to match but did
|
||||
if err == nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("%s: not expected to match but did", name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
filteringTestCases := map[string]struct {
|
||||
@ -1124,54 +1361,55 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
enableStorageObjectInUseProtection bool
|
||||
}{
|
||||
"feature enabled - pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pv),
|
||||
pvc: pvc,
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pv),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
},
|
||||
"feature enabled - pv deletionTimeStamp set": {
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(pvToDelete),
|
||||
pvc: pvc,
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(pvToDelete),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
},
|
||||
"feature disabled - pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pv),
|
||||
pvc: pvc,
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pv),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: false,
|
||||
},
|
||||
"feature disabled - pv deletionTimeStamp set": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pvToDelete),
|
||||
pvc: pvc,
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pvToDelete),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: false,
|
||||
},
|
||||
}
|
||||
for name, testCase := range filteringTestCases {
|
||||
toggleFeature(testCase.enableStorageObjectInUseProtection, "StorageObjectInUseProtection", t)
|
||||
pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false)
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if pvmatch == nil && testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase, no matching volume: %s", name)
|
||||
}
|
||||
if err != nil && testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
|
||||
}
|
||||
// expected to not match but either got an error or a returned pvmatch
|
||||
if pvmatch != nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase, expected no matching volume: %s", name)
|
||||
}
|
||||
if err != nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
|
||||
}
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)()
|
||||
|
||||
// make sure feature gate is turned off
|
||||
toggleFeature(false, "StorageObjectInUseProtection", t)
|
||||
pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false)
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if pvmatch == nil && testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase, no matching volume: %s", name)
|
||||
}
|
||||
if err != nil && testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
|
||||
}
|
||||
// expected to not match but either got an error or a returned pvmatch
|
||||
if pvmatch != nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase, expected no matching volume: %s", name)
|
||||
}
|
||||
if err != nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindingPreboundVolumes(t *testing.T) {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claim01",
|
||||
@ -1181,6 +1419,7 @@ func TestFindingPreboundVolumes(t *testing.T) {
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi")}},
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
claimRef, err := ref.GetReference(scheme.Scheme, claim)
|
||||
@ -1274,6 +1513,11 @@ func TestFindMatchVolumeWithNode(t *testing.T) {
|
||||
Labels: map[string]string{"key1": "value3"},
|
||||
},
|
||||
}
|
||||
node4 := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value4"},
|
||||
},
|
||||
}
|
||||
|
||||
scenarios := map[string]struct {
|
||||
expectedMatch string
|
||||
@ -1341,6 +1585,15 @@ func TestFindMatchVolumeWithNode(t *testing.T) {
|
||||
}),
|
||||
node: node3,
|
||||
},
|
||||
"fail-nonavaiable": {
|
||||
expectedMatch: "",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
pvc.Spec.StorageClassName = &classWait
|
||||
pvc.Name = "claim04"
|
||||
}),
|
||||
node: node4,
|
||||
},
|
||||
"success-bad-and-good-node-affinity": {
|
||||
expectedMatch: "affinity-pv3",
|
||||
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/main_test.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/main_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
13
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD
generated
vendored
@ -1,18 +1,14 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["metrics.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -27,4 +23,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
37
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go
generated
vendored
@ -21,8 +21,8 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -56,6 +56,8 @@ type PVCLister interface {
|
||||
func Register(pvLister PVLister, pvcLister PVCLister) {
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(newPVAndPVCCountCollector(pvLister, pvcLister))
|
||||
prometheus.MustRegister(volumeOperationMetric)
|
||||
prometheus.MustRegister(volumeOperationErrorsMetric)
|
||||
})
|
||||
}
|
||||
|
||||
@ -89,6 +91,19 @@ var (
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVCKey),
|
||||
"Gauge measuring number of persistent volume claim currently unbound",
|
||||
[]string{namespaceLabel}, nil)
|
||||
|
||||
volumeOperationMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "volume_operation_total_seconds",
|
||||
Help: "Total volume operation time",
|
||||
},
|
||||
[]string{"plugin_name", "operation_name"})
|
||||
volumeOperationErrorsMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "volume_operation_total_errors",
|
||||
Help: "Total volume operation erros",
|
||||
},
|
||||
[]string{"plugin_name", "operation_name"})
|
||||
)
|
||||
|
||||
func (collector *pvAndPVCCountCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
@ -124,7 +139,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric)
|
||||
float64(number),
|
||||
storageClassName)
|
||||
if err != nil {
|
||||
glog.Warningf("Create bound pv number metric failed: %v", err)
|
||||
klog.Warningf("Create bound pv number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
@ -136,7 +151,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric)
|
||||
float64(number),
|
||||
storageClassName)
|
||||
if err != nil {
|
||||
glog.Warningf("Create unbound pv number metric failed: %v", err)
|
||||
klog.Warningf("Create unbound pv number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
@ -164,7 +179,7 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric)
|
||||
float64(number),
|
||||
namespace)
|
||||
if err != nil {
|
||||
glog.Warningf("Create bound pvc number metric failed: %v", err)
|
||||
klog.Warningf("Create bound pvc number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
@ -176,9 +191,21 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric)
|
||||
float64(number),
|
||||
namespace)
|
||||
if err != nil {
|
||||
glog.Warningf("Create unbound pvc number metric failed: %v", err)
|
||||
klog.Warningf("Create unbound pvc number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
|
||||
// RecordVolumeOperationMetric records the latency and errors of volume operations.
|
||||
func RecordVolumeOperationMetric(pluginName, opName string, timeTaken float64, err error) {
|
||||
if pluginName == "" {
|
||||
pluginName = "N/A"
|
||||
}
|
||||
if err != nil {
|
||||
volumeOperationErrorsMetric.WithLabelValues(pluginName, opName).Inc()
|
||||
return
|
||||
}
|
||||
volumeOperationMetric.WithLabelValues(pluginName, opName).Observe(timeTaken)
|
||||
}
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/provision_test.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/provision_test.go
generated
vendored
@ -34,6 +34,7 @@ var class2Parameters = map[string]string{
|
||||
"param2": "value2",
|
||||
}
|
||||
var deleteReclaimPolicy = v1.PersistentVolumeReclaimDelete
|
||||
var modeImmediate = storage.VolumeBindingImmediate
|
||||
var storageClasses = []*storage.StorageClass{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -44,9 +45,10 @@ var storageClasses = []*storage.StorageClass{
|
||||
Name: "gold",
|
||||
},
|
||||
|
||||
Provisioner: mockPluginName,
|
||||
Parameters: class1Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
Provisioner: mockPluginName,
|
||||
Parameters: class1Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
VolumeBindingMode: &modeImmediate,
|
||||
},
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -55,9 +57,10 @@ var storageClasses = []*storage.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "silver",
|
||||
},
|
||||
Provisioner: mockPluginName,
|
||||
Parameters: class2Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
Provisioner: mockPluginName,
|
||||
Parameters: class2Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
VolumeBindingMode: &modeImmediate,
|
||||
},
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -66,9 +69,10 @@ var storageClasses = []*storage.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "external",
|
||||
},
|
||||
Provisioner: "vendor.com/my-volume",
|
||||
Parameters: class1Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
Provisioner: "vendor.com/my-volume",
|
||||
Parameters: class1Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
VolumeBindingMode: &modeImmediate,
|
||||
},
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -77,9 +81,10 @@ var storageClasses = []*storage.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "unknown-internal",
|
||||
},
|
||||
Provisioner: "kubernetes.io/unknown",
|
||||
Parameters: class1Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
Provisioner: "kubernetes.io/unknown",
|
||||
Parameters: class1Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
VolumeBindingMode: &modeImmediate,
|
||||
},
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -88,10 +93,11 @@ var storageClasses = []*storage.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "unsupported-mountoptions",
|
||||
},
|
||||
Provisioner: mockPluginName,
|
||||
Parameters: class1Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
MountOptions: []string{"foo"},
|
||||
Provisioner: mockPluginName,
|
||||
Parameters: class1Parameters,
|
||||
ReclaimPolicy: &deleteReclaimPolicy,
|
||||
MountOptions: []string{"foo"},
|
||||
VolumeBindingMode: &modeImmediate,
|
||||
},
|
||||
}
|
||||
|
||||
@ -166,7 +172,7 @@ func TestProvisionSync(t *testing.T) {
|
||||
{
|
||||
// No provisioning if there is a matching volume available
|
||||
"11-6 - provisioning when there is a volume available",
|
||||
newVolumeArray("volume11-6", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolumeArray("volume11-6", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
|
||||
newVolumeArray("volume11-6", "1Gi", "uid11-6", "claim11-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
|
||||
newClaimArray("claim11-6", "uid11-6", "1Gi", "", v1.ClaimPending, &classGold),
|
||||
newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", v1.ClaimBound, &classGold, annBoundByController, annBindCompleted),
|
||||
|
424
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
424
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
File diff suppressed because it is too large
Load Diff
98
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
generated
vendored
98
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
generated
vendored
@ -38,13 +38,13 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// This file contains the controller base functionality, i.e. framework to
|
||||
@ -73,18 +73,18 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
||||
eventRecorder := p.EventRecorder
|
||||
if eventRecorder == nil {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartLogging(klog.Infof)
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: p.KubeClient.CoreV1().Events("")})
|
||||
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
|
||||
}
|
||||
|
||||
controller := &PersistentVolumeController{
|
||||
volumes: newPersistentVolumeOrderedIndex(),
|
||||
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
kubeClient: p.KubeClient,
|
||||
eventRecorder: eventRecorder,
|
||||
runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
|
||||
cloud: p.Cloud,
|
||||
volumes: newPersistentVolumeOrderedIndex(),
|
||||
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
kubeClient: p.KubeClient,
|
||||
eventRecorder: eventRecorder,
|
||||
runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
|
||||
cloud: p.Cloud,
|
||||
enableDynamicProvisioning: p.EnableDynamicProvisioning,
|
||||
clusterName: p.ClusterName,
|
||||
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
|
||||
@ -134,27 +134,27 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
||||
func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
|
||||
volumeList, err := volumeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
|
||||
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
|
||||
return
|
||||
}
|
||||
for _, volume := range volumeList {
|
||||
volumeClone := volume.DeepCopy()
|
||||
if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil {
|
||||
glog.Errorf("error updating volume cache: %v", err)
|
||||
klog.Errorf("error updating volume cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
claimList, err := claimLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
|
||||
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
|
||||
return
|
||||
}
|
||||
for _, claim := range claimList {
|
||||
if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil {
|
||||
glog.Errorf("error updating claim cache: %v", err)
|
||||
klog.Errorf("error updating claim cache: %v", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("controller initialized")
|
||||
klog.V(4).Infof("controller initialized")
|
||||
}
|
||||
|
||||
// enqueueWork adds volume or claim to given work queue.
|
||||
@ -165,10 +165,10 @@ func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, o
|
||||
}
|
||||
objName, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get key from object: %v", err)
|
||||
klog.Errorf("failed to get key from object: %v", err)
|
||||
return
|
||||
}
|
||||
glog.V(5).Infof("enqueued %q for sync", objName)
|
||||
klog.V(5).Infof("enqueued %q for sync", objName)
|
||||
queue.Add(objName)
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
|
||||
// is an old version.
|
||||
new, err := ctrl.storeVolumeUpdate(volume)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
@ -198,9 +198,9 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
glog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
|
||||
klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
|
||||
} else {
|
||||
glog.Errorf("could not sync volume %q: %+v", volume.Name, err)
|
||||
klog.Errorf("could not sync volume %q: %+v", volume.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -208,7 +208,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
|
||||
// deleteVolume runs in worker thread and handles "volume deleted" event.
|
||||
func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) {
|
||||
_ = ctrl.volumes.store.Delete(volume)
|
||||
glog.V(4).Infof("volume %q deleted", volume.Name)
|
||||
klog.V(4).Infof("volume %q deleted", volume.Name)
|
||||
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
return
|
||||
@ -217,7 +217,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume
|
||||
// claim here in response to volume deletion prevents the claim from
|
||||
// waiting until the next sync period for its Lost status.
|
||||
claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
|
||||
glog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
|
||||
klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
|
||||
ctrl.claimQueue.Add(claimKey)
|
||||
}
|
||||
|
||||
@ -228,7 +228,7 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
|
||||
// an old version.
|
||||
new, err := ctrl.storeClaimUpdate(claim)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
@ -238,9 +238,9 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
glog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
|
||||
klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
|
||||
} else {
|
||||
glog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
|
||||
klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -248,17 +248,17 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
|
||||
// deleteClaim runs in worker thread and handles "claim deleted" event.
|
||||
func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) {
|
||||
_ = ctrl.claims.Delete(claim)
|
||||
glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
|
||||
klog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
|
||||
|
||||
volumeName := claim.Spec.VolumeName
|
||||
if volumeName == "" {
|
||||
glog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim))
|
||||
klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim))
|
||||
return
|
||||
}
|
||||
// sync the volume when its claim is deleted. Explicitly sync'ing the
|
||||
// volume here in response to claim deletion prevents the volume from
|
||||
// waiting until the next sync period for its Release.
|
||||
glog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName)
|
||||
klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName)
|
||||
ctrl.volumeQueue.Add(volumeName)
|
||||
}
|
||||
|
||||
@ -268,8 +268,8 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
|
||||
defer ctrl.claimQueue.ShutDown()
|
||||
defer ctrl.volumeQueue.ShutDown()
|
||||
|
||||
glog.Infof("Starting persistent volume controller")
|
||||
defer glog.Infof("Shutting down persistent volume controller")
|
||||
klog.Infof("Starting persistent volume controller")
|
||||
defer klog.Infof("Shutting down persistent volume controller")
|
||||
|
||||
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
|
||||
return
|
||||
@ -296,11 +296,11 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
|
||||
}
|
||||
defer ctrl.volumeQueue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
glog.V(5).Infof("volumeWorker[%s]", key)
|
||||
klog.V(5).Infof("volumeWorker[%s]", key)
|
||||
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
|
||||
klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
volume, err := ctrl.volumeLister.Get(name)
|
||||
@ -311,7 +311,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
|
||||
return false
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("error getting volume %q from informer: %v", key, err)
|
||||
klog.V(2).Infof("error getting volume %q from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -319,18 +319,18 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
|
||||
// "delete"
|
||||
volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("error getting volume %q from cache: %v", key, err)
|
||||
klog.V(2).Infof("error getting volume %q from cache: %v", key, err)
|
||||
return false
|
||||
}
|
||||
if !found {
|
||||
// The controller has already processed the delete event and
|
||||
// deleted the volume from its cache
|
||||
glog.V(2).Infof("deletion of volume %q was already processed", key)
|
||||
klog.V(2).Infof("deletion of volume %q was already processed", key)
|
||||
return false
|
||||
}
|
||||
volume, ok := volumeObj.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("expected volume, got %+v", volumeObj)
|
||||
klog.Errorf("expected volume, got %+v", volumeObj)
|
||||
return false
|
||||
}
|
||||
ctrl.deleteVolume(volume)
|
||||
@ -338,7 +338,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
|
||||
}
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
glog.Infof("volume worker queue shutting down")
|
||||
klog.Infof("volume worker queue shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -354,11 +354,11 @@ func (ctrl *PersistentVolumeController) claimWorker() {
|
||||
}
|
||||
defer ctrl.claimQueue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
glog.V(5).Infof("claimWorker[%s]", key)
|
||||
klog.V(5).Infof("claimWorker[%s]", key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
|
||||
klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
|
||||
@ -369,25 +369,25 @@ func (ctrl *PersistentVolumeController) claimWorker() {
|
||||
return false
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("error getting claim %q from informer: %v", key, err)
|
||||
klog.V(2).Infof("error getting claim %q from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// The claim is not in informer cache, the event must have been "delete"
|
||||
claimObj, found, err := ctrl.claims.GetByKey(key)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("error getting claim %q from cache: %v", key, err)
|
||||
klog.V(2).Infof("error getting claim %q from cache: %v", key, err)
|
||||
return false
|
||||
}
|
||||
if !found {
|
||||
// The controller has already processed the delete event and
|
||||
// deleted the claim from its cache
|
||||
glog.V(2).Infof("deletion of claim %q was already processed", key)
|
||||
klog.V(2).Infof("deletion of claim %q was already processed", key)
|
||||
return false
|
||||
}
|
||||
claim, ok := claimObj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("expected claim, got %+v", claimObj)
|
||||
klog.Errorf("expected claim, got %+v", claimObj)
|
||||
return false
|
||||
}
|
||||
ctrl.deleteClaim(claim)
|
||||
@ -395,7 +395,7 @@ func (ctrl *PersistentVolumeController) claimWorker() {
|
||||
}
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
glog.Infof("claim worker queue shutting down")
|
||||
klog.Infof("claim worker queue shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -405,11 +405,11 @@ func (ctrl *PersistentVolumeController) claimWorker() {
|
||||
// all consumers of PV/PVC shared informer to have a short resync period,
|
||||
// therefore we do our own.
|
||||
func (ctrl *PersistentVolumeController) resync() {
|
||||
glog.V(4).Infof("resyncing PV controller")
|
||||
klog.V(4).Infof("resyncing PV controller")
|
||||
|
||||
pvcs, err := ctrl.claimLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
glog.Warningf("cannot list claims: %s", err)
|
||||
klog.Warningf("cannot list claims: %s", err)
|
||||
return
|
||||
}
|
||||
for _, pvc := range pvcs {
|
||||
@ -418,7 +418,7 @@ func (ctrl *PersistentVolumeController) resync() {
|
||||
|
||||
pvs, err := ctrl.volumeLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
glog.Warningf("cannot list persistent volumes: %s", err)
|
||||
klog.Warningf("cannot list persistent volumes: %s", err)
|
||||
return
|
||||
}
|
||||
for _, pv := range pvs {
|
||||
@ -504,7 +504,7 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
|
||||
|
||||
if !found {
|
||||
// This is a new object
|
||||
glog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
if err = store.Add(obj); err != nil {
|
||||
return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err)
|
||||
}
|
||||
@ -528,11 +528,11 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
|
||||
// Throw away only older version, let the same version pass - we do want to
|
||||
// get periodic sync events.
|
||||
if oldObjResourceVersion > objResourceVersion {
|
||||
glog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
return false, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
if err = store.Update(obj); err != nil {
|
||||
return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err)
|
||||
}
|
||||
|
111
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_test.go
generated
vendored
111
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_test.go
generated
vendored
@ -20,18 +20,26 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
var (
|
||||
classNotHere = "not-here"
|
||||
classNoMode = "no-mode"
|
||||
classImmediateMode = "immediate-mode"
|
||||
classWaitMode = "wait-mode"
|
||||
)
|
||||
|
||||
// Test the real controller methods (add/update/delete claim/volume) with
|
||||
@ -96,7 +104,7 @@ func TestControllerSync(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
glog.V(4).Infof("starting test %q", test.name)
|
||||
klog.V(4).Infof("starting test %q", test.name)
|
||||
|
||||
// Initialize the controller
|
||||
client := &fake.Clientset{}
|
||||
@ -140,7 +148,7 @@ func TestControllerSync(t *testing.T) {
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
glog.V(4).Infof("controller synced, starting test")
|
||||
klog.V(4).Infof("controller synced, starting test")
|
||||
|
||||
// Call the tested function
|
||||
err = test.test(ctrl, reactor, test)
|
||||
@ -237,12 +245,21 @@ func addVolumeAnnotation(volume *v1.PersistentVolume, annName, annValue string)
|
||||
return volume
|
||||
}
|
||||
|
||||
func makePVCClass(scName *string) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
func makePVCClass(scName *string, hasSelectNodeAnno bool) *v1.PersistentVolumeClaim {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: scName,
|
||||
},
|
||||
}
|
||||
|
||||
if hasSelectNodeAnno {
|
||||
claim.Annotations[annSelectedNode] = "node-name"
|
||||
}
|
||||
|
||||
return claim
|
||||
}
|
||||
|
||||
func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
@ -255,42 +272,36 @@ func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storage
|
||||
}
|
||||
|
||||
func TestDelayBinding(t *testing.T) {
|
||||
var (
|
||||
classNotHere = "not-here"
|
||||
classNoMode = "no-mode"
|
||||
classImmediateMode = "immediate-mode"
|
||||
classWaitMode = "wait-mode"
|
||||
|
||||
modeImmediate = storagev1.VolumeBindingImmediate
|
||||
modeWait = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
)
|
||||
|
||||
tests := map[string]struct {
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
shouldDelay bool
|
||||
shouldFail bool
|
||||
}{
|
||||
"nil-class": {
|
||||
pvc: makePVCClass(nil),
|
||||
pvc: makePVCClass(nil, false),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"class-not-found": {
|
||||
pvc: makePVCClass(&classNotHere),
|
||||
pvc: makePVCClass(&classNotHere, false),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"no-mode-class": {
|
||||
pvc: makePVCClass(&classNoMode),
|
||||
pvc: makePVCClass(&classNoMode, false),
|
||||
shouldDelay: false,
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate-mode-class": {
|
||||
pvc: makePVCClass(&classImmediateMode),
|
||||
pvc: makePVCClass(&classImmediateMode, false),
|
||||
shouldDelay: false,
|
||||
},
|
||||
"wait-mode-class": {
|
||||
pvc: makePVCClass(&classWaitMode),
|
||||
pvc: makePVCClass(&classWaitMode, false),
|
||||
shouldDelay: true,
|
||||
},
|
||||
"wait-mode-class-with-selectedNode": {
|
||||
pvc: makePVCClass(&classWaitMode, true),
|
||||
shouldDelay: false,
|
||||
},
|
||||
}
|
||||
|
||||
classes := []*storagev1.StorageClass{
|
||||
@ -312,22 +323,8 @@ func TestDelayBinding(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// When volumeScheduling feature gate is disabled, should always be delayed
|
||||
name := "volumeScheduling-feature-disabled"
|
||||
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if shouldDelay {
|
||||
t.Errorf("Test %q returned true, expected false", name)
|
||||
}
|
||||
|
||||
// Enable volumeScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
|
||||
for name, test := range tests {
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(test.pvc)
|
||||
shouldDelay, err := ctrl.shouldDelayBinding(test.pvc)
|
||||
if err != nil && !test.shouldFail {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
@ -338,39 +335,21 @@ func TestDelayBinding(t *testing.T) {
|
||||
t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When dynamicProvisioningScheduling feature gate is disabled, should be delayed,
|
||||
// even if the pvc has selectedNode annotation.
|
||||
provisionedClaim := makePVCClass(&classWaitMode)
|
||||
provisionedClaim.Annotations = map[string]string{annSelectedNode: "node-name"}
|
||||
name = "dynamicProvisioningScheduling-feature-disabled"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if !shouldDelay {
|
||||
t.Errorf("Test %q returned false, expected true", name)
|
||||
func TestDelayBindingDisabled(t *testing.T) {
|
||||
// When volumeScheduling feature gate is disabled, should always be immediate
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, false)()
|
||||
|
||||
client := &fake.Clientset{}
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
classInformer := informerFactory.Storage().V1().StorageClasses()
|
||||
ctrl := &PersistentVolumeController{
|
||||
classLister: classInformer.Lister(),
|
||||
}
|
||||
|
||||
// Enable DynamicProvisioningScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=false")
|
||||
|
||||
// When the pvc does not have selectedNode annotation, should be delayed,
|
||||
// even if dynamicProvisioningScheduling feature gate is enabled.
|
||||
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-not-set"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if !shouldDelay {
|
||||
t.Errorf("Test %q returned false, expected true", name)
|
||||
}
|
||||
|
||||
// Should not be delayed when dynamicProvisioningScheduling feature gate is enabled,
|
||||
// and the pvc has selectedNode annotation.
|
||||
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-set"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
|
||||
name := "volumeScheduling-feature-disabled"
|
||||
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode, false))
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/recycle_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/recycle_test.go
generated
vendored
@ -229,6 +229,16 @@ func TestRecycleSync(t *testing.T) {
|
||||
// recycler simulates one recycle() call that succeeds.
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// volume is used by a completed pod, pod using claim with the same name bound to different pv is running, should recycle
|
||||
"6-14 - seemingly used by running pod",
|
||||
newVolumeArray("volume6-14", "1Gi", "uid6-14", "completedClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
|
||||
newVolumeArray("volume6-14", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
|
||||
newClaimArray("completedClaim", "uid6-14-x", "10Gi", "", v1.ClaimBound, nil),
|
||||
newClaimArray("completedClaim", "uid6-14-x", "10Gi", "", v1.ClaimBound, nil),
|
||||
noevents, noerrors,
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests, []*storage.StorageClass{}, pods)
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@ -83,7 +83,8 @@ func (e *errObjectName) Error() string {
|
||||
// Restore() sets the latest object pointer back to the informer object.
|
||||
// Get/List() always returns the latest object pointer.
|
||||
type assumeCache struct {
|
||||
mutex sync.Mutex
|
||||
// Synchronizes updates to store
|
||||
rwMutex sync.RWMutex
|
||||
|
||||
// describes the object stored
|
||||
description string
|
||||
@ -151,37 +152,37 @@ func (c *assumeCache) add(obj interface{}) {
|
||||
|
||||
name, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("add failed: %v", &errObjectName{err})
|
||||
klog.Errorf("add failed: %v", &errObjectName{err})
|
||||
return
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
|
||||
newVersion, err := c.getObjVersion(name, obj)
|
||||
if err != nil {
|
||||
glog.Errorf("add: couldn't get object version: %v", err)
|
||||
klog.Errorf("add: couldn't get object version: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
|
||||
if err != nil {
|
||||
glog.Errorf("add: couldn't get stored object version: %v", err)
|
||||
klog.Errorf("add: couldn't get stored object version: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Only update object if version is newer.
|
||||
// This is so we don't override assumed objects due to informer resync.
|
||||
if newVersion <= storedVersion {
|
||||
glog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion)
|
||||
klog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj}
|
||||
c.store.Update(objInfo)
|
||||
glog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj)
|
||||
klog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj)
|
||||
}
|
||||
|
||||
func (c *assumeCache) update(oldObj interface{}, newObj interface{}) {
|
||||
@ -195,17 +196,17 @@ func (c *assumeCache) delete(obj interface{}) {
|
||||
|
||||
name, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("delete failed: %v", &errObjectName{err})
|
||||
klog.Errorf("delete failed: %v", &errObjectName{err})
|
||||
return
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
objInfo := &objInfo{name: name}
|
||||
err = c.store.Delete(objInfo)
|
||||
if err != nil {
|
||||
glog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err)
|
||||
klog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -239,8 +240,8 @@ func (c *assumeCache) getObjInfo(name string) (*objInfo, error) {
|
||||
}
|
||||
|
||||
func (c *assumeCache) Get(objName string) (interface{}, error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
|
||||
objInfo, err := c.getObjInfo(objName)
|
||||
if err != nil {
|
||||
@ -250,20 +251,20 @@ func (c *assumeCache) Get(objName string) (interface{}, error) {
|
||||
}
|
||||
|
||||
func (c *assumeCache) List(indexObj interface{}) []interface{} {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
|
||||
allObjs := []interface{}{}
|
||||
objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
|
||||
if err != nil {
|
||||
glog.Errorf("list index error: %v", err)
|
||||
klog.Errorf("list index error: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, obj := range objs {
|
||||
objInfo, ok := obj.(*objInfo)
|
||||
if !ok {
|
||||
glog.Errorf("list error: %v", &errWrongType{"objInfo", obj})
|
||||
klog.Errorf("list error: %v", &errWrongType{"objInfo", obj})
|
||||
continue
|
||||
}
|
||||
allObjs = append(allObjs, objInfo.latestObj)
|
||||
@ -277,8 +278,8 @@ func (c *assumeCache) Assume(obj interface{}) error {
|
||||
return &errObjectName{err}
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
objInfo, err := c.getObjInfo(name)
|
||||
if err != nil {
|
||||
@ -301,21 +302,21 @@ func (c *assumeCache) Assume(obj interface{}) error {
|
||||
|
||||
// Only update the cached object
|
||||
objInfo.latestObj = obj
|
||||
glog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion)
|
||||
klog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *assumeCache) Restore(objName string) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
objInfo, err := c.getObjInfo(objName)
|
||||
if err != nil {
|
||||
// This could be expected if object got deleted
|
||||
glog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err)
|
||||
klog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err)
|
||||
} else {
|
||||
objInfo.latestObj = objInfo.apiObj
|
||||
glog.V(4).Infof("Restored %v %q", c.description, objName)
|
||||
klog.V(4).Infof("Restored %v %q", c.description, objName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -365,7 +366,7 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume
|
||||
for _, obj := range objs {
|
||||
pv, ok := obj.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj})
|
||||
klog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj})
|
||||
}
|
||||
pvs = append(pvs, pv)
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ func TestAssumeUpdatePVCCache(t *testing.T) {
|
||||
|
||||
// Assume PVC
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node"
|
||||
newPVC.Annotations[annSelectedNode] = "test-node"
|
||||
if err := cache.Assume(newPVC); err != nil {
|
||||
t.Fatalf("failed to assume PVC: %v", err)
|
||||
}
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_bind_cache_metrics.go
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_bind_cache_metrics.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// VolumeSchedulerSubsystem - subsystem name used by scheduler
|
||||
const VolumeSchedulerSubsystem = "scheduler_volume"
|
||||
|
||||
var (
|
||||
VolumeBindingRequestSchedulerBinderCache = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: VolumeSchedulerSubsystem,
|
||||
Name: "binder_cache_requests_total",
|
||||
Help: "Total number for request volume binding cache",
|
||||
},
|
||||
[]string{"operation"},
|
||||
)
|
||||
VolumeSchedulingStageLatency = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: VolumeSchedulerSubsystem,
|
||||
Name: "scheduling_duration_seconds",
|
||||
Help: "Volume scheduling stage latency",
|
||||
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
|
||||
},
|
||||
[]string{"operation"},
|
||||
)
|
||||
VolumeSchedulingStageFailed = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: VolumeSchedulerSubsystem,
|
||||
Name: "scheduling_stage_error_total",
|
||||
Help: "Volume scheduling stage error count",
|
||||
},
|
||||
[]string{"operation"},
|
||||
)
|
||||
)
|
||||
|
||||
// RegisterVolumeSchedulingMetrics is used for scheduler, because the volume binding cache is a library
|
||||
// used by scheduler process.
|
||||
func RegisterVolumeSchedulingMetrics() {
|
||||
prometheus.MustRegister(VolumeBindingRequestSchedulerBinderCache)
|
||||
prometheus.MustRegister(VolumeSchedulingStageLatency)
|
||||
prometheus.MustRegister(VolumeSchedulingStageFailed)
|
||||
}
|
354
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go
generated
vendored
354
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go
generated
vendored
@ -19,18 +19,18 @@ package persistentvolume
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@ -44,16 +44,19 @@ import (
|
||||
// a. Invokes all predicate functions, parallelized across nodes. FindPodVolumes() is invoked here.
|
||||
// b. Invokes all priority functions. Future/TBD
|
||||
// c. Selects the best node for the Pod.
|
||||
// d. Cache the node selection for the Pod. (Assume phase)
|
||||
// d. Cache the node selection for the Pod. AssumePodVolumes() is invoked here.
|
||||
// i. If PVC binding is required, cache in-memory only:
|
||||
// * Updated PV objects for prebinding to the corresponding PVCs.
|
||||
// * For the pod, which PVs need API updates.
|
||||
// AssumePodVolumes() is invoked here. Then BindPodVolumes() is called asynchronously by the
|
||||
// scheduler. After BindPodVolumes() is complete, the Pod is added back to the scheduler queue
|
||||
// to be processed again until all PVCs are bound.
|
||||
// ii. If PVC binding is not required, cache the Pod->Node binding in the scheduler's pod cache,
|
||||
// and asynchronously bind the Pod to the Node. This is handled in the scheduler and not here.
|
||||
// 2. Once the assume operation is done, the scheduler processes the next Pod in the scheduler queue
|
||||
// * For manual binding: update PV objects for prebinding to the corresponding PVCs.
|
||||
// * For dynamic provisioning: update PVC object with a selected node from c)
|
||||
// * For the pod, which PVCs and PVs need API updates.
|
||||
// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache,
|
||||
// This is handled in the scheduler and not here.
|
||||
// e. Asynchronously bind volumes and pod in a separate goroutine
|
||||
// i. BindPodVolumes() is called first. It makes all the necessary API updates and waits for
|
||||
// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent
|
||||
// back through the scheduler.
|
||||
// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding.
|
||||
// 2. Once all the assume operations are done in d), the scheduler processes the next Pod in the scheduler queue
|
||||
// while the actual binding operation occurs in the background.
|
||||
type SchedulerVolumeBinder interface {
|
||||
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the node.
|
||||
@ -73,18 +76,18 @@ type SchedulerVolumeBinder interface {
|
||||
// 2. Take the PVCs that need provisioning and update the PVC cache with related
|
||||
// annotations set.
|
||||
//
|
||||
// It returns true if all volumes are fully bound, and returns true if any volume binding/provisioning
|
||||
// API operation needs to be done afterwards.
|
||||
// It returns true if all volumes are fully bound
|
||||
//
|
||||
// This function will modify assumedPod with the node name.
|
||||
// This function is called serially.
|
||||
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, bindingRequired bool, err error)
|
||||
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error)
|
||||
|
||||
// BindPodVolumes will:
|
||||
// 1. Initiate the volume binding by making the API call to prebind the PV
|
||||
// to its matching PVC.
|
||||
// 2. Trigger the volume provisioning by making the API call to set related
|
||||
// annotations on the PVC
|
||||
// 3. Wait for PVCs to be completely bound by the PV controller
|
||||
//
|
||||
// This function can be called in parallel.
|
||||
BindPodVolumes(assumedPod *v1.Pod) error
|
||||
@ -102,6 +105,9 @@ type volumeBinder struct {
|
||||
// Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes.
|
||||
// AssumePodVolumes modifies the bindings again for use in BindPodVolumes.
|
||||
podBindingCache PodBindingCache
|
||||
|
||||
// Amount of time to wait for the bind operation to succeed
|
||||
bindTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions.
|
||||
@ -109,7 +115,8 @@ func NewVolumeBinder(
|
||||
kubeClient clientset.Interface,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
pvInformer coreinformers.PersistentVolumeInformer,
|
||||
storageClassInformer storageinformers.StorageClassInformer) SchedulerVolumeBinder {
|
||||
storageClassInformer storageinformers.StorageClassInformer,
|
||||
bindTimeout time.Duration) SchedulerVolumeBinder {
|
||||
|
||||
// TODO: find better way...
|
||||
ctrl := &PersistentVolumeController{
|
||||
@ -122,6 +129,7 @@ func NewVolumeBinder(
|
||||
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
|
||||
pvCache: NewPVAssumeCache(pvInformer.Informer()),
|
||||
podBindingCache: NewPodBindingCache(),
|
||||
bindTimeout: bindTimeout,
|
||||
}
|
||||
|
||||
return b
|
||||
@ -136,11 +144,18 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
podName := getPodName(pod)
|
||||
|
||||
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
|
||||
glog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
|
||||
klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
|
||||
|
||||
// Initialize to true for pods that don't have volumes
|
||||
unboundVolumesSatisfied = true
|
||||
boundVolumesSatisfied = true
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds())
|
||||
if err != nil {
|
||||
VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
// The pod's volumes need to be processed in one call to avoid the race condition where
|
||||
// volumes can get bound/provisioned in between calls.
|
||||
@ -151,7 +166,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
|
||||
// Immediate claims should be bound
|
||||
if len(unboundClaimsImmediate) > 0 {
|
||||
return false, false, fmt.Errorf("pod has unbound PersistentVolumeClaims")
|
||||
return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
|
||||
}
|
||||
|
||||
// Check PV node affinity on bound volumes
|
||||
@ -169,13 +184,11 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -187,25 +200,34 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
// in podBindingCache for the chosen node, and:
|
||||
// 1. Update the pvCache with the new prebound PV.
|
||||
// 2. Update the pvcCache with the new PVCs with annotations set
|
||||
// It will update podBindingCache again with the PVs and PVCs that need an API update.
|
||||
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound, bindingRequired bool, err error) {
|
||||
// 3. Update podBindingCache again with cached API updates for PVs and PVCs.
|
||||
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) {
|
||||
podName := getPodName(assumedPod)
|
||||
|
||||
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
|
||||
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds())
|
||||
if err != nil {
|
||||
VolumeSchedulingStageFailed.WithLabelValues("assume").Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
if allBound := b.arePodVolumesBound(assumedPod); allBound {
|
||||
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
|
||||
return true, false, nil
|
||||
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
assumedPod.Spec.NodeName = nodeName
|
||||
// Assume PV
|
||||
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
|
||||
newBindings := []*bindingInfo{}
|
||||
|
||||
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
|
||||
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
|
||||
|
||||
// Assume PV
|
||||
newBindings := []*bindingInfo{}
|
||||
for _, binding := range claimsToBind {
|
||||
newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc)
|
||||
glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
||||
klog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
||||
podName,
|
||||
binding.pv.Name,
|
||||
binding.pvc.Name,
|
||||
@ -214,29 +236,20 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
|
||||
err)
|
||||
if err != nil {
|
||||
b.revertAssumedPVs(newBindings)
|
||||
return false, true, err
|
||||
return false, err
|
||||
}
|
||||
// TODO: can we assume everytime?
|
||||
if dirty {
|
||||
err = b.pvCache.Assume(newPV)
|
||||
if err != nil {
|
||||
b.revertAssumedPVs(newBindings)
|
||||
return false, true, err
|
||||
return false, err
|
||||
}
|
||||
|
||||
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
|
||||
}
|
||||
}
|
||||
|
||||
// Don't update cached bindings if no API updates are needed. This can happen if we
|
||||
// previously updated the PV object and are waiting for the PV controller to finish binding.
|
||||
if len(newBindings) != 0 {
|
||||
bindingRequired = true
|
||||
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
|
||||
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
|
||||
}
|
||||
|
||||
// Assume PVCs
|
||||
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
|
||||
|
||||
newProvisionedPVCs := []*v1.PersistentVolumeClaim{}
|
||||
for _, claim := range claimsToProvision {
|
||||
// The claims from method args can be pointing to watcher cache. We must not
|
||||
@ -253,50 +266,45 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
|
||||
newProvisionedPVCs = append(newProvisionedPVCs, claimClone)
|
||||
}
|
||||
|
||||
if len(newProvisionedPVCs) != 0 {
|
||||
bindingRequired = true
|
||||
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
|
||||
}
|
||||
// Update cache with the assumed pvcs and pvs
|
||||
// Even if length is zero, update the cache with an empty slice to indicate that no
|
||||
// operations are needed
|
||||
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
|
||||
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache
|
||||
// and makes the API update for those PVs/PVCs.
|
||||
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
|
||||
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache,
|
||||
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
|
||||
// by the PV controller.
|
||||
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) {
|
||||
podName := getPodName(assumedPod)
|
||||
glog.V(4).Infof("BindPodVolumes for pod %q", podName)
|
||||
klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName)
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
VolumeSchedulingStageLatency.WithLabelValues("bind").Observe(time.Since(start).Seconds())
|
||||
if err != nil {
|
||||
VolumeSchedulingStageFailed.WithLabelValues("bind").Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
|
||||
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
|
||||
|
||||
// Do the actual prebinding. Let the PV controller take care of the rest
|
||||
// There is no API rollback if the actual binding fails
|
||||
for i, bindingInfo := range bindings {
|
||||
glog.V(5).Infof("BindPodVolumes: Pod %q, binding PV %q to PVC %q", podName, bindingInfo.pv.Name, bindingInfo.pvc.Name)
|
||||
_, err := b.ctrl.updateBindVolumeToClaim(bindingInfo.pv, bindingInfo.pvc, false)
|
||||
if err != nil {
|
||||
// only revert assumed cached updates for volumes we haven't successfully bound
|
||||
b.revertAssumedPVs(bindings[i:])
|
||||
// Revert all of the assumed cached updates for claims,
|
||||
// since no actual API update will be done
|
||||
b.revertAssumedPVCs(claimsToProvision)
|
||||
return err
|
||||
}
|
||||
// Start API operations
|
||||
err = b.bindAPIUpdate(podName, bindings, claimsToProvision)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
|
||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||
for i, claim := range claimsToProvision {
|
||||
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
|
||||
glog.V(4).Infof("updating PersistentVolumeClaim[%s] failed: %v", getPVCName(claim), err)
|
||||
// only revert assumed cached updates for claims we haven't successfully updated
|
||||
b.revertAssumedPVCs(claimsToProvision[i:])
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return wait.Poll(time.Second, b.bindTimeout, func() (bool, error) {
|
||||
// Get cached values every time in case the pod gets deleted
|
||||
bindings = b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
|
||||
claimsToProvision = b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
|
||||
return b.checkBindings(assumedPod, bindings, claimsToProvision)
|
||||
})
|
||||
}
|
||||
|
||||
func getPodName(pod *v1.Pod) string {
|
||||
@ -307,46 +315,162 @@ func getPVCName(pvc *v1.PersistentVolumeClaim) string {
|
||||
return pvc.Namespace + "/" + pvc.Name
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFullyBound bool) (bool, *v1.PersistentVolumeClaim, error) {
|
||||
// bindAPIUpdate gets the cached bindings and PVCs to provision in podBindingCache
|
||||
// and makes the API update for those PVs/PVCs.
|
||||
func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
|
||||
if bindings == nil {
|
||||
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
|
||||
}
|
||||
if claimsToProvision == nil {
|
||||
return fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
|
||||
}
|
||||
|
||||
lastProcessedBinding := 0
|
||||
lastProcessedProvisioning := 0
|
||||
defer func() {
|
||||
// only revert assumed cached updates for volumes we haven't successfully bound
|
||||
if lastProcessedBinding < len(bindings) {
|
||||
b.revertAssumedPVs(bindings[lastProcessedBinding:])
|
||||
}
|
||||
// only revert assumed cached updates for claims we haven't updated,
|
||||
if lastProcessedProvisioning < len(claimsToProvision) {
|
||||
b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:])
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
binding *bindingInfo
|
||||
claim *v1.PersistentVolumeClaim
|
||||
)
|
||||
|
||||
// Do the actual prebinding. Let the PV controller take care of the rest
|
||||
// There is no API rollback if the actual binding fails
|
||||
for _, binding = range bindings {
|
||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
|
||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||
if _, err := b.ctrl.updateBindVolumeToClaim(binding.pv, binding.pvc, false); err != nil {
|
||||
return err
|
||||
}
|
||||
lastProcessedBinding++
|
||||
}
|
||||
|
||||
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
|
||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||
for _, claim = range claimsToProvision {
|
||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
||||
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
|
||||
return err
|
||||
}
|
||||
lastProcessedProvisioning++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkBindings runs through all the PVCs in the Pod and checks:
|
||||
// * if the PVC is fully bound
|
||||
// * if there are any conditions that require binding to fail and be retried
|
||||
//
|
||||
// It returns true when all of the Pod's PVCs are fully bound, and error if
|
||||
// binding (and scheduling) needs to be retried
|
||||
func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) {
|
||||
podName := getPodName(pod)
|
||||
if bindings == nil {
|
||||
return false, fmt.Errorf("failed to get cached bindings for pod %q", podName)
|
||||
}
|
||||
if claimsToProvision == nil {
|
||||
return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
|
||||
}
|
||||
|
||||
for _, binding := range bindings {
|
||||
// Check for any conditions that might require scheduling retry
|
||||
|
||||
// Check if pv still exists
|
||||
pv, err := b.pvCache.GetPV(binding.pv.Name)
|
||||
if err != nil || pv == nil {
|
||||
return false, fmt.Errorf("failed to check pv binding: %v", err)
|
||||
}
|
||||
|
||||
// Check if pv.ClaimRef got dropped by unbindVolume()
|
||||
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" {
|
||||
return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name)
|
||||
}
|
||||
|
||||
// Check if pvc is fully bound
|
||||
if isBound, _, err := b.isPVCBound(binding.pvc.Namespace, binding.pvc.Name); !isBound || err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// TODO; what if pvc is bound to the wrong pv? It means our assume cache should be reverted.
|
||||
// Or will pv controller cleanup the pv.ClaimRef?
|
||||
}
|
||||
|
||||
for _, claim := range claimsToProvision {
|
||||
bound, pvc, err := b.isPVCBound(claim.Namespace, claim.Name)
|
||||
if err != nil || pvc == nil {
|
||||
return false, fmt.Errorf("failed to check pvc binding: %v", err)
|
||||
}
|
||||
|
||||
// Check if selectedNode annotation is still set
|
||||
if pvc.Annotations == nil {
|
||||
return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name)
|
||||
}
|
||||
selectedNode := pvc.Annotations[annSelectedNode]
|
||||
if selectedNode != pod.Spec.NodeName {
|
||||
return false, fmt.Errorf("selectedNode annotation value %q not set to scheduled node %q", selectedNode, pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
if !bound {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All pvs and pvcs that we operated on are bound
|
||||
klog.V(4).Infof("All PVCs for pod %q are bound", podName)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume) (bool, *v1.PersistentVolumeClaim, error) {
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
pvcName := vol.PersistentVolumeClaim.ClaimName
|
||||
return b.isPVCBound(namespace, pvcName)
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pvcName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
pvc, err := b.pvcCache.GetPVC(getPVCName(claim))
|
||||
pvcKey := getPVCName(claim)
|
||||
pvc, err := b.pvcCache.GetPVC(pvcKey)
|
||||
if err != nil || pvc == nil {
|
||||
return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcName, err)
|
||||
return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcKey, err)
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
if pvName != "" {
|
||||
if checkFullyBound {
|
||||
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
|
||||
glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName)
|
||||
return true, pvc, nil
|
||||
} else {
|
||||
glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName)
|
||||
return false, pvc, nil
|
||||
}
|
||||
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
|
||||
klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvName)
|
||||
return true, pvc, nil
|
||||
} else {
|
||||
klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvName)
|
||||
return false, pvc, nil
|
||||
}
|
||||
glog.V(5).Infof("PVC %q is bound or prebound to PV %q", getPVCName(pvc), pvName)
|
||||
return true, pvc, nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("PVC %q is not bound", getPVCName(pvc))
|
||||
klog.V(5).Infof("PVC %q is not bound", pvcKey)
|
||||
return false, pvc, nil
|
||||
}
|
||||
|
||||
// arePodVolumesBound returns true if all volumes are fully bound
|
||||
func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol, true); !isBound {
|
||||
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol); !isBound {
|
||||
// Pod has at least one PVC that needs binding
|
||||
return false
|
||||
}
|
||||
@ -362,7 +486,7 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
||||
unboundClaims = []*bindingInfo{}
|
||||
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol, false)
|
||||
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@ -376,7 +500,8 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if delayBinding {
|
||||
// Prebound PVCs are treated as unbound immediate binding
|
||||
if delayBinding && pvc.Spec.VolumeName == "" {
|
||||
// Scheduler path
|
||||
unboundClaims = append(unboundClaims, &bindingInfo{pvc: pvc})
|
||||
} else {
|
||||
@ -398,13 +523,13 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
|
||||
|
||||
err = volumeutil.CheckNodeAffinity(pv, node.Labels)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, err.Error(), podName)
|
||||
klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
|
||||
klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
|
||||
klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -428,6 +553,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
|
||||
storageClassName = *storageClass
|
||||
}
|
||||
allPVs := b.pvCache.ListPVs(storageClassName)
|
||||
pvcName := getPVCName(bindingInfo.pvc)
|
||||
|
||||
// Find a matching PV
|
||||
bindingInfo.pv, err = findMatchingVolume(bindingInfo.pvc, allPVs, node, chosenPVs, true)
|
||||
@ -435,7 +561,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
|
||||
return false, nil, err
|
||||
}
|
||||
if bindingInfo.pv == nil {
|
||||
glog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, getPVCName(bindingInfo.pvc), node.Name)
|
||||
klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name)
|
||||
unboundClaims = append(unboundClaims, bindingInfo.pvc)
|
||||
foundMatches = false
|
||||
continue
|
||||
@ -444,7 +570,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
|
||||
// matching PV needs to be excluded so we don't select it again
|
||||
chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv
|
||||
matchedClaims = append(matchedClaims, bindingInfo)
|
||||
glog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, getPVCName(bindingInfo.pvc), node.Name, podName)
|
||||
klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, pvcName, node.Name, podName)
|
||||
}
|
||||
|
||||
// Mark cache with all the matches for each PVC for this node
|
||||
@ -453,7 +579,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
|
||||
}
|
||||
|
||||
if foundMatches {
|
||||
glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
|
||||
klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
|
||||
}
|
||||
|
||||
return
|
||||
@ -467,9 +593,10 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
||||
provisionedClaims := []*v1.PersistentVolumeClaim{}
|
||||
|
||||
for _, claim := range claimsToProvision {
|
||||
pvcName := getPVCName(claim)
|
||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
if className == "" {
|
||||
return false, fmt.Errorf("no class for claim %q", getPVCName(claim))
|
||||
return false, fmt.Errorf("no class for claim %q", pvcName)
|
||||
}
|
||||
|
||||
class, err := b.ctrl.classLister.Get(className)
|
||||
@ -478,13 +605,13 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
||||
}
|
||||
provisioner := class.Provisioner
|
||||
if provisioner == "" || provisioner == notSupportedProvisioner {
|
||||
glog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, getPVCName(claim))
|
||||
klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if the node can satisfy the topology requirement in the class
|
||||
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
|
||||
glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, getPVCName(claim))
|
||||
klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -494,7 +621,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
||||
provisionedClaims = append(provisionedClaims, claim)
|
||||
|
||||
}
|
||||
glog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name)
|
||||
klog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name)
|
||||
|
||||
// Mark cache with all the PVCs that need provisioning for this node
|
||||
b.podBindingCache.UpdateProvisionedPVCs(pod, node.Name, provisionedClaims)
|
||||
@ -522,19 +649,6 @@ type bindingInfo struct {
|
||||
pv *v1.PersistentVolume
|
||||
}
|
||||
|
||||
// Used in unit test errors
|
||||
func (b bindingInfo) String() string {
|
||||
pvcName := ""
|
||||
pvName := ""
|
||||
if b.pvc != nil {
|
||||
pvcName = getPVCName(b.pvc)
|
||||
}
|
||||
if b.pv != nil {
|
||||
pvName = b.pv.Name
|
||||
}
|
||||
return fmt.Sprintf("[PVC %q, PV %q]", pvcName, pvName)
|
||||
}
|
||||
|
||||
type byPVCSize []*bindingInfo
|
||||
|
||||
func (a byPVCSize) Len() int {
|
||||
|
@ -31,6 +31,8 @@ type PodBindingCache interface {
|
||||
UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo)
|
||||
|
||||
// GetBindings will return the cached bindings for the given pod and node.
|
||||
// A nil return value means that the entry was not found. An empty slice
|
||||
// means that no binding operations are needed.
|
||||
GetBindings(pod *v1.Pod, node string) []*bindingInfo
|
||||
|
||||
// UpdateProvisionedPVCs will update the cache with the given provisioning decisions
|
||||
@ -38,6 +40,8 @@ type PodBindingCache interface {
|
||||
UpdateProvisionedPVCs(pod *v1.Pod, node string, provisionings []*v1.PersistentVolumeClaim)
|
||||
|
||||
// GetProvisionedPVCs will return the cached provisioning decisions for the given pod and node.
|
||||
// A nil return value means that the entry was not found. An empty slice
|
||||
// means that no provisioning operations are needed.
|
||||
GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim
|
||||
|
||||
// DeleteBindings will remove all cached bindings and provisionings for the given pod.
|
||||
@ -46,7 +50,8 @@ type PodBindingCache interface {
|
||||
}
|
||||
|
||||
type podBindingCache struct {
|
||||
mutex sync.Mutex
|
||||
// synchronizes bindingDecisions
|
||||
rwMutex sync.RWMutex
|
||||
|
||||
// Key = pod name
|
||||
// Value = nodeDecisions
|
||||
@ -68,16 +73,20 @@ func NewPodBindingCache() PodBindingCache {
|
||||
}
|
||||
|
||||
func (c *podBindingCache) DeleteBindings(pod *v1.Pod) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
delete(c.bindingDecisions, podName)
|
||||
|
||||
if _, ok := c.bindingDecisions[podName]; ok {
|
||||
delete(c.bindingDecisions, podName)
|
||||
VolumeBindingRequestSchedulerBinderCache.WithLabelValues("delete").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
@ -90,6 +99,7 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b
|
||||
decision = nodeDecision{
|
||||
bindings: bindings,
|
||||
}
|
||||
VolumeBindingRequestSchedulerBinderCache.WithLabelValues("add").Inc()
|
||||
} else {
|
||||
decision.bindings = bindings
|
||||
}
|
||||
@ -97,8 +107,8 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b
|
||||
}
|
||||
|
||||
func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
@ -113,8 +123,8 @@ func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
|
||||
}
|
||||
|
||||
func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs []*v1.PersistentVolumeClaim) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.Lock()
|
||||
defer c.rwMutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
@ -134,8 +144,8 @@ func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs [
|
||||
}
|
||||
|
||||
func (c *podBindingCache) GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.rwMutex.RLock()
|
||||
defer c.rwMutex.RUnlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_fake.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_fake.go
generated
vendored
@ -16,18 +16,15 @@ limitations under the License.
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
import "k8s.io/api/core/v1"
|
||||
|
||||
type FakeVolumeBinderConfig struct {
|
||||
AllBound bool
|
||||
FindUnboundSatsified bool
|
||||
FindBoundSatsified bool
|
||||
FindErr error
|
||||
AssumeBindingRequired bool
|
||||
AssumeErr error
|
||||
BindErr error
|
||||
AllBound bool
|
||||
FindUnboundSatsified bool
|
||||
FindBoundSatsified bool
|
||||
FindErr error
|
||||
AssumeErr error
|
||||
BindErr error
|
||||
}
|
||||
|
||||
// NewVolumeBinder sets up all the caches needed for the scheduler to make
|
||||
@ -48,9 +45,9 @@ func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVo
|
||||
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
|
||||
}
|
||||
|
||||
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, bool, error) {
|
||||
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, error) {
|
||||
b.AssumeCalled = true
|
||||
return b.config.AllBound, b.config.AssumeBindingRequired, b.config.AssumeErr
|
||||
return b.config.AllBound, b.config.AssumeErr
|
||||
}
|
||||
|
||||
func (b *FakeVolumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
|
||||
|
640
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_test.go
generated
vendored
640
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_test.go
generated
vendored
@ -20,8 +20,9 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
@ -29,7 +30,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@ -38,20 +38,30 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
unboundPVC = makeTestPVC("unbound-pvc", "1G", pvcUnbound, "", "1", &waitClass)
|
||||
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", pvcUnbound, "", "1", &waitClass)
|
||||
preboundPVC = makeTestPVC("prebound-pvc", "1G", pvcPrebound, "pv-node1a", "1", &waitClass)
|
||||
boundPVC = makeTestPVC("bound-pvc", "1G", pvcBound, "pv-bound", "1", &waitClass)
|
||||
boundPVC2 = makeTestPVC("bound-pvc2", "1G", pvcBound, "pv-bound2", "1", &waitClass)
|
||||
badPVC = makeBadPVC()
|
||||
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", pvcUnbound, "", "1", &immediateClass)
|
||||
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", pvcBound, "pv-bound-immediate", "1", &immediateClass)
|
||||
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", pvcUnbound, "", "1", &waitClass)
|
||||
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "1", &waitClass)
|
||||
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "2", &waitClass)
|
||||
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", pvcUnbound, "", "1", &provisionNotSupportClass)
|
||||
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", pvcUnbound, "", "1", &topoMismatchClass)
|
||||
// PVCs for manual binding
|
||||
// TODO: clean up all of these
|
||||
unboundPVC = makeTestPVC("unbound-pvc", "1G", "", pvcUnbound, "", "1", &waitClass)
|
||||
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", "", pvcUnbound, "", "1", &waitClass)
|
||||
preboundPVC = makeTestPVC("prebound-pvc", "1G", "", pvcPrebound, "pv-node1a", "1", &waitClass)
|
||||
preboundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcPrebound, "pv-node1a", "1", &waitClass)
|
||||
boundPVC = makeTestPVC("bound-pvc", "1G", "", pvcBound, "pv-bound", "1", &waitClass)
|
||||
boundPVC2 = makeTestPVC("bound-pvc2", "1G", "", pvcBound, "pv-bound2", "1", &waitClass)
|
||||
boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass)
|
||||
badPVC = makeBadPVC()
|
||||
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass)
|
||||
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass)
|
||||
|
||||
// PVCs for dynamic provisioning
|
||||
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
|
||||
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
|
||||
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", "", pvcUnbound, "", "2", &waitClassWithProvisioner)
|
||||
provisionedPVCBound = makeTestPVC("provisioned-pvc", "1Gi", "", pvcBound, "some-pv", "1", &waitClassWithProvisioner)
|
||||
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClass)
|
||||
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", "", pvcUnbound, "", "1", &topoMismatchClass)
|
||||
|
||||
selectedNodePVC = makeTestPVC("provisioned-pvc", "1Gi", nodeLabelValue, pvcSelectedNode, "", "1", &waitClassWithProvisioner)
|
||||
|
||||
// PVs for manual binding
|
||||
pvNoNode = makeTestPV("pv-no-node", "", "1G", "1", nil, waitClass)
|
||||
pvNode1a = makeTestPV("pv-node1a", "node1", "5G", "1", nil, waitClass)
|
||||
pvNode1b = makeTestPV("pv-node1b", "node1", "10G", "1", nil, waitClass)
|
||||
@ -59,12 +69,13 @@ var (
|
||||
pvNode2 = makeTestPV("pv-node2", "node2", "1G", "1", nil, waitClass)
|
||||
pvPrebound = makeTestPV("pv-prebound", "node1", "1G", "1", unboundPVC, waitClass)
|
||||
pvBound = makeTestPV("pv-bound", "node1", "1G", "1", boundPVC, waitClass)
|
||||
pvNode1aBound = makeTestPV("pv-node1a", "node1", "1G", "1", unboundPVC, waitClass)
|
||||
pvNode1bBound = makeTestPV("pv-node1b", "node1", "5G", "1", unboundPVC2, waitClass)
|
||||
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "5G", "2", unboundPVC2, waitClass)
|
||||
pvNode1aBound = makeTestPV("pv-node1a", "node1", "5G", "1", unboundPVC, waitClass)
|
||||
pvNode1bBound = makeTestPV("pv-node1b", "node1", "10G", "1", unboundPVC2, waitClass)
|
||||
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass)
|
||||
pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass)
|
||||
pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass)
|
||||
|
||||
// PVC/PV bindings for manual binding
|
||||
binding1a = makeBinding(unboundPVC, pvNode1a)
|
||||
binding1b = makeBinding(unboundPVC2, pvNode1b)
|
||||
bindingNoNode = makeBinding(unboundPVC, pvNoNode)
|
||||
@ -72,11 +83,13 @@ var (
|
||||
binding1aBound = makeBinding(unboundPVC, pvNode1aBound)
|
||||
binding1bBound = makeBinding(unboundPVC2, pvNode1bBound)
|
||||
|
||||
// storage class names
|
||||
waitClass = "waitClass"
|
||||
immediateClass = "immediateClass"
|
||||
provisionNotSupportClass = "provisionNotSupportedClass"
|
||||
waitClassWithProvisioner = "waitClassWithProvisioner"
|
||||
topoMismatchClass = "topoMismatchClass"
|
||||
|
||||
// node topology
|
||||
nodeLabelKey = "nodeKey"
|
||||
nodeLabelValue = "node1"
|
||||
)
|
||||
@ -102,7 +115,8 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
client,
|
||||
pvcInformer,
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
classInformer)
|
||||
classInformer,
|
||||
10*time.Second)
|
||||
|
||||
// Add storageclasses
|
||||
waitMode := storagev1.VolumeBindingWaitForFirstConsumer
|
||||
@ -110,7 +124,7 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
classes := []*storagev1.StorageClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: waitClass,
|
||||
Name: waitClassWithProvisioner,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "test-provisioner",
|
||||
@ -133,7 +147,7 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: provisionNotSupportClass,
|
||||
Name: waitClass,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "kubernetes.io/no-provisioner",
|
||||
@ -247,17 +261,44 @@ func (env *testEnv) initPodCache(pod *v1.Pod, node string, bindings []*bindingIn
|
||||
func (env *testEnv) validatePodCache(t *testing.T, name, node string, pod *v1.Pod, expectedBindings []*bindingInfo, expectedProvisionings []*v1.PersistentVolumeClaim) {
|
||||
cache := env.internalBinder.podBindingCache
|
||||
bindings := cache.GetBindings(pod, node)
|
||||
if aLen, eLen := len(bindings), len(expectedBindings); aLen != eLen {
|
||||
t.Errorf("Test %q failed. expected %v bindings, got %v", name, eLen, aLen)
|
||||
} else if expectedBindings == nil && bindings != nil {
|
||||
// nil and empty are different
|
||||
t.Errorf("Test %q failed. expected nil bindings, got empty", name)
|
||||
} else if expectedBindings != nil && bindings == nil {
|
||||
// nil and empty are different
|
||||
t.Errorf("Test %q failed. expected empty bindings, got nil", name)
|
||||
} else {
|
||||
for i := 0; i < aLen; i++ {
|
||||
// Validate PV
|
||||
if !reflect.DeepEqual(expectedBindings[i].pv, bindings[i].pv) {
|
||||
t.Errorf("Test %q failed. binding.pv doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedBindings[i].pv, bindings[i].pv))
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedBindings, bindings) {
|
||||
t.Errorf("Test %q failed: Expected bindings %+v, got %+v", name, expectedBindings, bindings)
|
||||
// Validate PVC
|
||||
if !reflect.DeepEqual(expectedBindings[i].pvc, bindings[i].pvc) {
|
||||
t.Errorf("Test %q failed. binding.pvc doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedBindings[i].pvc, bindings[i].pvc))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provisionedClaims := cache.GetProvisionedPVCs(pod, node)
|
||||
|
||||
if !reflect.DeepEqual(expectedProvisionings, provisionedClaims) {
|
||||
t.Errorf("Test %q failed: Expected provisionings %+v, got %+v", name, expectedProvisionings, provisionedClaims)
|
||||
if aLen, eLen := len(provisionedClaims), len(expectedProvisionings); aLen != eLen {
|
||||
t.Errorf("Test %q failed. expected %v provisioned claims, got %v", name, eLen, aLen)
|
||||
} else if expectedProvisionings == nil && provisionedClaims != nil {
|
||||
// nil and empty are different
|
||||
t.Errorf("Test %q failed. expected nil provisionings, got empty", name)
|
||||
} else if expectedProvisionings != nil && provisionedClaims == nil {
|
||||
// nil and empty are different
|
||||
t.Errorf("Test %q failed. expected empty provisionings, got nil", name)
|
||||
} else {
|
||||
for i := 0; i < aLen; i++ {
|
||||
if !reflect.DeepEqual(expectedProvisionings[i], provisionedClaims[i]) {
|
||||
t.Errorf("Test %q failed. provisioned claims doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedProvisionings[i], provisionedClaims[i]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod) []*bindingInfo {
|
||||
@ -266,8 +307,6 @@ func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod)
|
||||
}
|
||||
|
||||
func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) {
|
||||
// TODO: Check binding cache
|
||||
|
||||
// Check pv cache
|
||||
pvCache := env.internalBinder.pvCache
|
||||
for _, b := range bindings {
|
||||
@ -383,17 +422,22 @@ const (
|
||||
pvcUnbound = iota
|
||||
pvcPrebound
|
||||
pvcBound
|
||||
pvcSelectedNode
|
||||
)
|
||||
|
||||
func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
|
||||
func makeTestPVC(name, size, node string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "testns",
|
||||
UID: types.UID("pvc-uid"),
|
||||
ResourceVersion: resourceVersion,
|
||||
SelfLink: testapi.Default.SelfLink("pvc", name),
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{
|
||||
@ -402,10 +446,14 @@ func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion s
|
||||
},
|
||||
},
|
||||
StorageClassName: className,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
|
||||
switch pvcBoundState {
|
||||
case pvcSelectedNode:
|
||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annSelectedNode, node)
|
||||
// don't fallthrough
|
||||
case pvcBound:
|
||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annBindCompleted, "yes")
|
||||
fallthrough
|
||||
@ -416,6 +464,7 @@ func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion s
|
||||
}
|
||||
|
||||
func makeBadPVC() *v1.PersistentVolumeClaim {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-pvc",
|
||||
@ -431,11 +480,13 @@ func makeBadPVC() *v1.PersistentVolumeClaim {
|
||||
},
|
||||
},
|
||||
StorageClassName: &waitClass,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentVolumeClaim, className string) *v1.PersistentVolume {
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -446,6 +497,10 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity),
|
||||
},
|
||||
StorageClassName: className,
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeAvailable,
|
||||
},
|
||||
}
|
||||
if node != "" {
|
||||
@ -454,9 +509,12 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
|
||||
|
||||
if boundToPVC != nil {
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{
|
||||
Name: boundToPVC.Name,
|
||||
Namespace: boundToPVC.Namespace,
|
||||
UID: boundToPVC.UID,
|
||||
Kind: boundToPVC.Kind,
|
||||
APIVersion: boundToPVC.APIVersion,
|
||||
ResourceVersion: boundToPVC.ResourceVersion,
|
||||
Name: boundToPVC.Name,
|
||||
Namespace: boundToPVC.Namespace,
|
||||
UID: boundToPVC.UID,
|
||||
}
|
||||
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, annBoundByController, "yes")
|
||||
}
|
||||
@ -464,6 +522,24 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
|
||||
return pv
|
||||
}
|
||||
|
||||
func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim {
|
||||
newPVC := pvc.DeepCopy()
|
||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annSelectedNode, node)
|
||||
return newPVC
|
||||
}
|
||||
|
||||
func pvcSetEmptyAnnotations(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Annotations = map[string]string{}
|
||||
return newPVC
|
||||
}
|
||||
|
||||
func pvRemoveClaimUID(pv *v1.PersistentVolume) *v1.PersistentVolume {
|
||||
newPV := pv.DeepCopy()
|
||||
newPV.Spec.ClaimRef.UID = ""
|
||||
return newPV
|
||||
}
|
||||
|
||||
func makePod(pvcs []*v1.PersistentVolumeClaim) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -515,7 +591,7 @@ func makeBinding(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) *bindin
|
||||
func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
res := pvc.DeepCopy()
|
||||
// Add provision related annotations
|
||||
res.Annotations[annSelectedNode] = nodeLabelValue
|
||||
metav1.SetMetaDataAnnotation(&res.ObjectMeta, annSelectedNode, nodeLabelValue)
|
||||
|
||||
return res
|
||||
}
|
||||
@ -568,10 +644,9 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
shouldFail: true,
|
||||
},
|
||||
"prebound-pvc": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
shouldFail: true,
|
||||
},
|
||||
"unbound-pvc,pv-same-node": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
@ -621,11 +696,9 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
expectedBound: true,
|
||||
},
|
||||
"one-prebound,one-unbound": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedBindings: []*bindingInfo{binding1a},
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate-bound-pvc": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
|
||||
@ -660,10 +733,6 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Set feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
@ -674,7 +743,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
glog.V(5).Infof("Running test case %q", name)
|
||||
klog.V(5).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
testEnv := newTestBinder(t)
|
||||
@ -776,10 +845,6 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Set VolumeScheduling and DynamicProvisioningScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,DynamicProvisioningScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,DynamicProvisioningScheduling=false")
|
||||
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
@ -834,74 +899,69 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
provisionedPVCs []*v1.PersistentVolumeClaim
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
expectedBindingRequired bool
|
||||
expectedAllBound bool
|
||||
shouldFail bool
|
||||
expectedAllBound bool
|
||||
|
||||
// if nil, use bindings
|
||||
expectedBindings []*bindingInfo
|
||||
expectedBindings []*bindingInfo
|
||||
expectedProvisionings []*v1.PersistentVolumeClaim
|
||||
}{
|
||||
"all-bound": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBound},
|
||||
expectedAllBound: true,
|
||||
},
|
||||
"prebound-pvc": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
},
|
||||
"one-binding": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedBindings: []*bindingInfo{binding1aBound},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"two-bindings": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
|
||||
bindings: []*bindingInfo{binding1a, binding1b},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
|
||||
bindings: []*bindingInfo{binding1a, binding1b},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedBindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"pv-already-bound": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedBindingRequired: false,
|
||||
expectedBindings: []*bindingInfo{},
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedBindings: []*bindingInfo{binding1aBound},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"claimref-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a, bindingBad},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
shouldFail: true,
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a, bindingBad},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
shouldFail: true,
|
||||
},
|
||||
"tmpupdate-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a, binding1b},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
shouldFail: true,
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*bindingInfo{binding1a, binding1b},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
shouldFail: true,
|
||||
},
|
||||
"one-binding, one-pvc-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedBindings: []*bindingInfo{binding1aBound},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{selectedNodePVC},
|
||||
},
|
||||
"one-binding, one-provision-tmpupdate-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
|
||||
shouldFail: true,
|
||||
expectedBindingRequired: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
glog.V(5).Infof("Running test case %q", name)
|
||||
klog.V(5).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
testEnv := newTestBinder(t)
|
||||
@ -911,7 +971,7 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
testEnv.initVolumes(scenario.pvs, scenario.pvs)
|
||||
|
||||
// Execute
|
||||
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, "node1")
|
||||
allBound, err := testEnv.binder.AssumePodVolumes(pod, "node1")
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
@ -920,24 +980,25 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
if scenario.shouldFail && err == nil {
|
||||
t.Errorf("Test %q failed: returned success but expected error", name)
|
||||
}
|
||||
if scenario.expectedBindingRequired != bindingRequired {
|
||||
t.Errorf("Test %q failed: returned unexpected bindingRequired: %v", name, bindingRequired)
|
||||
}
|
||||
if scenario.expectedAllBound != allBound {
|
||||
t.Errorf("Test %q failed: returned unexpected allBound: %v", name, allBound)
|
||||
}
|
||||
if scenario.expectedBindings == nil {
|
||||
scenario.expectedBindings = scenario.bindings
|
||||
}
|
||||
if scenario.shouldFail {
|
||||
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
|
||||
} else {
|
||||
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
|
||||
if scenario.expectedProvisionings == nil {
|
||||
scenario.expectedProvisionings = scenario.provisionedPVCs
|
||||
}
|
||||
if scenario.shouldFail {
|
||||
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.expectedProvisionings)
|
||||
} else {
|
||||
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.expectedProvisionings)
|
||||
}
|
||||
testEnv.validatePodCache(t, name, pod.Spec.NodeName, pod, scenario.expectedBindings, scenario.expectedProvisionings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBindPodVolumes(t *testing.T) {
|
||||
func TestBindAPIUpdate(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
bindings []*bindingInfo
|
||||
@ -960,34 +1021,56 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
// if nil, use expectedPVCs
|
||||
expectedAPIPVCs []*v1.PersistentVolumeClaim
|
||||
}{
|
||||
"all-bound": {},
|
||||
"not-fully-bound": {
|
||||
bindings: []*bindingInfo{},
|
||||
"nothing-to-bind-nil": {
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-bindings-nil": {
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-provisionings-nil": {
|
||||
bindings: []*bindingInfo{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-empty": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"one-binding": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"two-bindings": {
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"api-already-updated": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
},
|
||||
"api-update-failed": {
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
apiPVs: []*v1.PersistentVolume{pvNode1a, pvNode1bBoundHigherVersion},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1b},
|
||||
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
|
||||
shouldFail: true,
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
apiPVs: []*v1.PersistentVolume{pvNode1a, pvNode1bBoundHigherVersion},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1b},
|
||||
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"one-provisioned-pvc": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
},
|
||||
"provision-api-update-failed": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), addProvisionAnn(provisionedPVC2)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVC2},
|
||||
apiPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVCHigherVersion},
|
||||
@ -995,7 +1078,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
expectedAPIPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVCHigherVersion},
|
||||
shouldFail: true,
|
||||
},
|
||||
"bingding-succeed, provision-api-update-failed": {
|
||||
"binding-succeed, provision-api-update-failed": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
@ -1008,7 +1091,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for name, scenario := range scenarios {
|
||||
glog.V(5).Infof("Running test case %q", name)
|
||||
klog.V(4).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
testEnv := newTestBinder(t)
|
||||
@ -1024,7 +1107,7 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
testEnv.assumeVolumes(t, name, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Execute
|
||||
err := testEnv.binder.BindPodVolumes(pod)
|
||||
err := testEnv.internalBinder.bindAPIUpdate(pod.Name, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
@ -1044,11 +1127,302 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAssumeVolumes(t *testing.T) {
|
||||
// Set feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
func TestCheckBindings(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
bindings []*bindingInfo
|
||||
cachedPVs []*v1.PersistentVolume
|
||||
|
||||
provisionedPVCs []*v1.PersistentVolumeClaim
|
||||
cachedPVCs []*v1.PersistentVolumeClaim
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
expectedBound bool
|
||||
}{
|
||||
"nothing-to-bind-nil": {
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-bindings-nil": {
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-provisionings-nil": {
|
||||
bindings: []*bindingInfo{},
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
expectedBound: true,
|
||||
},
|
||||
"binding-bound": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
|
||||
expectedBound: true,
|
||||
},
|
||||
"binding-prebound": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{preboundPVCNode1a},
|
||||
},
|
||||
"binding-unbound": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
},
|
||||
"binding-pvc-not-exists": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-pv-not-exists": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-claimref-nil": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-claimref-uid-empty": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvRemoveClaimUID(pvNode1aBound)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-one-bound,one-unbound": {
|
||||
bindings: []*bindingInfo{binding1aBound, binding1bBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a, unboundPVC2},
|
||||
},
|
||||
"provisioning-pvc-bound": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVCBound)},
|
||||
expectedBound: true,
|
||||
},
|
||||
"provisioning-pvc-unbound": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
},
|
||||
"provisioning-pvc-not-exists": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
shouldFail: true,
|
||||
},
|
||||
"provisioning-pvc-annotations-nil": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
shouldFail: true,
|
||||
},
|
||||
"provisioning-pvc-selected-node-dropped": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{pvcSetEmptyAnnotations(provisionedPVC)},
|
||||
shouldFail: true,
|
||||
},
|
||||
"provisioning-pvc-selected-node-wrong-node": {
|
||||
bindings: []*bindingInfo{},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{pvcSetSelectedNode(provisionedPVC, "wrong-node")},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-bound-provisioning-unbound": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a, addProvisionAnn(provisionedPVC)},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
klog.V(4).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
pod := makePod(nil)
|
||||
testEnv := newTestBinder(t)
|
||||
testEnv.initVolumes(scenario.cachedPVs, nil)
|
||||
testEnv.initClaims(scenario.cachedPVCs, nil)
|
||||
|
||||
// Execute
|
||||
allBound, err := testEnv.internalBinder.checkBindings(pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
t.Errorf("Test %q failed: returned error: %v", name, err)
|
||||
}
|
||||
if scenario.shouldFail && err == nil {
|
||||
t.Errorf("Test %q failed: returned success but expected error", name)
|
||||
}
|
||||
if scenario.expectedBound != allBound {
|
||||
t.Errorf("Test %q failed: returned bound %v", name, allBound)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBindPodVolumes(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
// These tests only support a single pv and pvc and static binding
|
||||
bindingsNil bool // Pass in nil bindings slice
|
||||
binding *bindingInfo
|
||||
cachedPV *v1.PersistentVolume
|
||||
cachedPVC *v1.PersistentVolumeClaim
|
||||
apiPV *v1.PersistentVolume
|
||||
|
||||
// This function runs with a delay of 5 seconds
|
||||
delayFunc func(*testing.T, *testEnv, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim)
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
}{
|
||||
"nothing-to-bind-nil": {
|
||||
bindingsNil: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
"nothing-to-bind-empty": {},
|
||||
"already-bound": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1aBound,
|
||||
cachedPVC: boundPVCNode1a,
|
||||
},
|
||||
"binding-succeeds-after-time": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1a,
|
||||
cachedPVC: unboundPVC,
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
// Update PVC to be fully bound to PV
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.ResourceVersion = "100"
|
||||
newPVC.Spec.VolumeName = pv.Name
|
||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, annBindCompleted, "yes")
|
||||
|
||||
// Update pvc cache, fake client doesn't invoke informers
|
||||
internalBinder, ok := testEnv.binder.(*volumeBinder)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal binder")
|
||||
}
|
||||
|
||||
pvcCache := internalBinder.pvcCache
|
||||
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal PVC cache")
|
||||
}
|
||||
internalPVCCache.add(newPVC)
|
||||
},
|
||||
},
|
||||
"pod-deleted-after-time": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1a,
|
||||
cachedPVC: unboundPVC,
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
bindingsCache := testEnv.binder.GetBindingsCache()
|
||||
if bindingsCache == nil {
|
||||
t.Fatalf("Failed to get bindings cache")
|
||||
}
|
||||
|
||||
// Delete the pod from the cache
|
||||
bindingsCache.DeleteBindings(pod)
|
||||
|
||||
// Check that it's deleted
|
||||
bindings := bindingsCache.GetBindings(pod, "node1")
|
||||
if bindings != nil {
|
||||
t.Fatalf("Failed to delete bindings")
|
||||
}
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-times-out": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1a,
|
||||
cachedPVC: unboundPVC,
|
||||
shouldFail: true,
|
||||
},
|
||||
"binding-fails": {
|
||||
binding: binding1bBound,
|
||||
cachedPV: pvNode1b,
|
||||
apiPV: pvNode1bBoundHigherVersion,
|
||||
cachedPVC: unboundPVC2,
|
||||
shouldFail: true,
|
||||
},
|
||||
"check-fails": {
|
||||
binding: binding1aBound,
|
||||
cachedPV: pvNode1a,
|
||||
cachedPVC: unboundPVC,
|
||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
// Delete PVC
|
||||
// Update pvc cache, fake client doesn't invoke informers
|
||||
internalBinder, ok := testEnv.binder.(*volumeBinder)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal binder")
|
||||
}
|
||||
|
||||
pvcCache := internalBinder.pvcCache
|
||||
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal PVC cache")
|
||||
}
|
||||
internalPVCCache.delete(pvc)
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
klog.V(4).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
pod := makePod(nil)
|
||||
if scenario.apiPV == nil {
|
||||
scenario.apiPV = scenario.cachedPV
|
||||
}
|
||||
testEnv := newTestBinder(t)
|
||||
if !scenario.bindingsNil {
|
||||
if scenario.binding != nil {
|
||||
testEnv.initVolumes([]*v1.PersistentVolume{scenario.cachedPV}, []*v1.PersistentVolume{scenario.apiPV})
|
||||
testEnv.initClaims([]*v1.PersistentVolumeClaim{scenario.cachedPVC}, nil)
|
||||
testEnv.assumeVolumes(t, name, "node1", pod, []*bindingInfo{scenario.binding}, []*v1.PersistentVolumeClaim{})
|
||||
} else {
|
||||
testEnv.assumeVolumes(t, name, "node1", pod, []*bindingInfo{}, []*v1.PersistentVolumeClaim{})
|
||||
}
|
||||
}
|
||||
|
||||
if scenario.delayFunc != nil {
|
||||
go func() {
|
||||
time.Sleep(5 * time.Second)
|
||||
klog.V(5).Infof("Running delay function")
|
||||
scenario.delayFunc(t, testEnv, pod, scenario.binding.pv, scenario.binding.pvc)
|
||||
}()
|
||||
}
|
||||
|
||||
// Execute
|
||||
err := testEnv.binder.BindPodVolumes(pod)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
t.Errorf("Test %q failed: returned error: %v", name, err)
|
||||
}
|
||||
if scenario.shouldFail && err == nil {
|
||||
t.Errorf("Test %q failed: returned success but expected error", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAssumeVolumes(t *testing.T) {
|
||||
// Test case
|
||||
podPVCs := []*v1.PersistentVolumeClaim{unboundPVC}
|
||||
pvs := []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1c}
|
||||
@ -1080,17 +1454,15 @@ func TestFindAssumeVolumes(t *testing.T) {
|
||||
expectedBindings := testEnv.getPodBindings(t, "before-assume", testNode.Name, pod)
|
||||
|
||||
// 2. Assume matches
|
||||
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
|
||||
allBound, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
|
||||
if err != nil {
|
||||
t.Errorf("Test failed: AssumePodVolumes returned error: %v", err)
|
||||
}
|
||||
if allBound {
|
||||
t.Errorf("Test failed: detected unbound volumes as bound")
|
||||
}
|
||||
if !bindingRequired {
|
||||
t.Errorf("Test failed: binding not required")
|
||||
}
|
||||
testEnv.validateAssume(t, "assume", pod, expectedBindings, nil)
|
||||
|
||||
// After assume, claimref should be set on pv
|
||||
expectedBindings = testEnv.getPodBindings(t, "after-assume", testNode.Name, pod)
|
||||
|
||||
@ -1106,6 +1478,6 @@ func TestFindAssumeVolumes(t *testing.T) {
|
||||
if !unboundSatisfied {
|
||||
t.Errorf("Test failed: couldn't find PVs for all PVCs")
|
||||
}
|
||||
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, nil)
|
||||
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, []*v1.PersistentVolumeClaim{})
|
||||
}
|
||||
}
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go
generated
vendored
@ -25,8 +25,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
@ -79,10 +80,6 @@ func (ctrl *PersistentVolumeController) GetMounter(pluginName string) mount.Inte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetHostName() string {
|
||||
return ""
|
||||
}
|
||||
@ -113,6 +110,12 @@ func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ s
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) {
|
||||
return func(types.UID) {
|
||||
klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController")
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec {
|
||||
return mount.NewOsExec()
|
||||
}
|
||||
@ -128,3 +131,8 @@ func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
|
||||
func (ctrl *PersistentVolumeController) GetEventRecorder() record.EventRecorder {
|
||||
return ctrl.eventRecorder
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetCSIClient() csiclientset.Interface {
|
||||
// No volume plugin needs csi.storage.k8s.io client in PV controller.
|
||||
return nil
|
||||
}
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/controller/volume/pvcprotection/BUILD
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/controller/volume/pvcprotection/BUILD
generated
vendored
@ -10,17 +10,17 @@ go_library(
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -31,17 +31,17 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -31,6 +30,7 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
@ -57,8 +57,8 @@ type Controller struct {
|
||||
// NewPVCProtectionController returns a new instance of PVCProtectionController.
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) *Controller {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
}
|
||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@ -96,8 +96,8 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting PVC protection controller")
|
||||
defer glog.Infof("Shutting down PVC protection controller")
|
||||
klog.Infof("Starting PVC protection controller")
|
||||
defer klog.Infof("Shutting down PVC protection controller")
|
||||
|
||||
if !controller.WaitForCacheSync("PVC protection", stopCh, c.pvcListerSynced, c.podListerSynced) {
|
||||
return
|
||||
@ -142,15 +142,15 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
}
|
||||
|
||||
func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
|
||||
glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName)
|
||||
klog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName)
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime))
|
||||
}()
|
||||
|
||||
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
|
||||
if apierrs.IsNotFound(err) {
|
||||
glog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName)
|
||||
klog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
@ -188,10 +188,10 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
||||
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||
klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("Added protection finalizer to PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
klog.V(3).Infof("Added protection finalizer to PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -200,10 +200,10 @@ func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
||||
claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
|
||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||
klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("Removed protection finalizer from PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
klog.V(3).Infof("Removed protection finalizer from PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -218,12 +218,7 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
// prevents scheduling pods with deletion timestamp, so we can be
|
||||
// pretty sure it won't be scheduled in parallel to this check.
|
||||
// Therefore this pod does not block the PVC from deletion.
|
||||
glog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name)
|
||||
continue
|
||||
}
|
||||
if volumeutil.IsPodTerminated(pod, pod.Status) {
|
||||
// This pod is being unmounted/detached or is already
|
||||
// unmounted/detached. It does not block the PVC from deletion.
|
||||
klog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name)
|
||||
continue
|
||||
}
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
@ -231,13 +226,13 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
continue
|
||||
}
|
||||
if volume.PersistentVolumeClaim.ClaimName == pvc.Name {
|
||||
glog.V(2).Infof("Keeping PVC %s/%s, it is used by pod %s/%s", pvc.Namespace, pvc.Name, pod.Namespace, pod.Name)
|
||||
klog.V(2).Infof("Keeping PVC %s/%s, it is used by pod %s/%s", pvc.Namespace, pvc.Name, pod.Namespace, pod.Name)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(3).Infof("PVC %s/%s is unused", pvc.Namespace, pvc.Name)
|
||||
klog.V(3).Infof("PVC %s/%s is unused", pvc.Namespace, pvc.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -253,7 +248,7 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for Persistent Volume Claim %#v: %v", pvc, err))
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Got event on PVC %s", key)
|
||||
klog.V(4).Infof("Got event on PVC %s", key)
|
||||
|
||||
if needToAddFinalizer(pvc) || isDeletionCandidate(pvc) {
|
||||
c.queue.Add(key)
|
||||
@ -281,7 +276,7 @@ func (c *Controller) podAddedDeletedUpdated(obj interface{}, deleted bool) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Got event on pod %s/%s", pod.Namespace, pod.Name)
|
||||
klog.V(4).Infof("Got event on pod %s/%s", pod.Namespace, pod.Name)
|
||||
|
||||
// Enqueue all PVCs that the pod uses
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -264,14 +264,12 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer + pods with the PVC and is finished -> finalizer is removed",
|
||||
name: "deleted PVC with finalizer + pods with the PVC finished but is not deleted -> finalizer is not removed",
|
||||
initialObjects: []runtime.Object{
|
||||
withStatus(v1.PodFailed, withPVC(defaultPVCName, pod())),
|
||||
},
|
||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
//
|
||||
@ -287,14 +285,12 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "updated finished Pod -> finalizer is removed",
|
||||
name: "updated finished Pod -> finalizer is not removed",
|
||||
initialObjects: []runtime.Object{
|
||||
deleted(withProtectionFinalizer(pvc())),
|
||||
},
|
||||
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
@ -378,7 +374,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
break
|
||||
}
|
||||
if ctrl.queue.Len() > 0 {
|
||||
glog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len())
|
||||
klog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len())
|
||||
ctrl.processNextWorkItem()
|
||||
}
|
||||
if ctrl.queue.Len() > 0 {
|
||||
@ -389,7 +385,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
if currentActionCount < len(test.expectedActions) {
|
||||
// Do not log evey wait, only when the action count changes.
|
||||
if lastReportedActionCount < currentActionCount {
|
||||
glog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions))
|
||||
klog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions))
|
||||
lastReportedActionCount = currentActionCount
|
||||
}
|
||||
// The test expected more to happen, wait for the actions.
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/controller/volume/pvprotection/BUILD
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/controller/volume/pvprotection/BUILD
generated
vendored
@ -10,16 +10,16 @@ go_library(
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -44,16 +44,16 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/controller/volume/pvprotection/pv_protection_controller.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/controller/volume/pvprotection/pv_protection_controller.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@ -30,6 +29,7 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
@ -53,8 +53,8 @@ type Controller struct {
|
||||
// NewPVProtectionController returns a new *Controller.
|
||||
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) *Controller {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
}
|
||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@ -78,8 +78,8 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting PV protection controller")
|
||||
defer glog.Infof("Shutting down PV protection controller")
|
||||
klog.Infof("Starting PV protection controller")
|
||||
defer klog.Infof("Shutting down PV protection controller")
|
||||
|
||||
if !controller.WaitForCacheSync("PV protection", stopCh, c.pvListerSynced) {
|
||||
return
|
||||
@ -120,15 +120,15 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
}
|
||||
|
||||
func (c *Controller) processPV(pvName string) error {
|
||||
glog.V(4).Infof("Processing PV %s", pvName)
|
||||
klog.V(4).Infof("Processing PV %s", pvName)
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime))
|
||||
}()
|
||||
|
||||
pv, err := c.pvLister.Get(pvName)
|
||||
if apierrs.IsNotFound(err) {
|
||||
glog.V(4).Infof("PV %s not found, ignoring", pvName)
|
||||
klog.V(4).Infof("PV %s not found, ignoring", pvName)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
@ -163,10 +163,10 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
|
||||
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
||||
_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name)
|
||||
klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("Added protection finalizer to PV %s", pv.Name)
|
||||
klog.V(3).Infof("Added protection finalizer to PV %s", pv.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -175,10 +175,10 @@ func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error {
|
||||
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
|
||||
_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
|
||||
klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("Removed protection finalizer from PV %s", pv.Name)
|
||||
klog.V(3).Infof("Removed protection finalizer from PV %s", pv.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -200,7 +200,7 @@ func (c *Controller) pvAddedUpdated(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("PV informer returned non-PV object: %#v", obj))
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Got event on PV %s", pv.Name)
|
||||
klog.V(4).Infof("Got event on PV %s", pv.Name)
|
||||
|
||||
if needToAddFinalizer(pv) || isDeletionCandidate(pv) {
|
||||
c.queue.Add(pv.Name)
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -246,7 +246,7 @@ func TestPVProtectionController(t *testing.T) {
|
||||
break
|
||||
}
|
||||
if ctrl.queue.Len() > 0 {
|
||||
glog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len())
|
||||
klog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len())
|
||||
ctrl.processNextWorkItem()
|
||||
}
|
||||
if ctrl.queue.Len() > 0 {
|
||||
@ -257,7 +257,7 @@ func TestPVProtectionController(t *testing.T) {
|
||||
if currentActionCount < len(test.expectedActions) {
|
||||
// Do not log evey wait, only when the action count changes.
|
||||
if lastReportedActionCount < currentActionCount {
|
||||
glog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions))
|
||||
klog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions))
|
||||
lastReportedActionCount = currentActionCount
|
||||
}
|
||||
// The test expected more to happen, wait for the actions.
|
||||
|
Reference in New Issue
Block a user