mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
43
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["scheduling_queue.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/queue",
|
||||
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["scheduling_queue_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
769
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go
generated
vendored
Normal file
769
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go
generated
vendored
Normal file
@ -0,0 +1,769 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains structures that implement scheduling queue types.
|
||||
// Scheduling queues hold pods waiting to be scheduled. This file has two types
|
||||
// of scheduling queue: 1) a FIFO, which is mostly the same as cache.FIFO, 2) a
|
||||
// priority queue which has two sub queues. One sub-queue holds pods that are
|
||||
// being considered for scheduling. This is called activeQ. Another queue holds
|
||||
// pods that are already tried and are determined to be unschedulable. The latter
|
||||
// is called unschedulableQ.
|
||||
// FIFO is here for flag-gating purposes and allows us to use the traditional
|
||||
// scheduling queue when util.PodPriorityEnabled() returns false.
|
||||
|
||||
package queue
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
var (
|
||||
queueClosed = "scheduling queue is closed"
|
||||
)
|
||||
|
||||
// SchedulingQueue is an interface for a queue to store pods waiting to be scheduled.
|
||||
// The interface follows a pattern similar to cache.FIFO and cache.Heap and
|
||||
// makes it easy to use those data structures as a SchedulingQueue.
|
||||
type SchedulingQueue interface {
|
||||
Add(pod *v1.Pod) error
|
||||
AddIfNotPresent(pod *v1.Pod) error
|
||||
AddUnschedulableIfNotPresent(pod *v1.Pod) error
|
||||
// Pop removes the head of the queue and returns it. It blocks if the
|
||||
// queue is empty and waits until a new item is added to the queue.
|
||||
Pop() (*v1.Pod, error)
|
||||
Update(oldPod, newPod *v1.Pod) error
|
||||
Delete(pod *v1.Pod) error
|
||||
MoveAllToActiveQueue()
|
||||
AssignedPodAdded(pod *v1.Pod)
|
||||
AssignedPodUpdated(pod *v1.Pod)
|
||||
WaitingPodsForNode(nodeName string) []*v1.Pod
|
||||
WaitingPods() []*v1.Pod
|
||||
// Close closes the SchedulingQueue so that the goroutine which is
|
||||
// waiting to pop items can exit gracefully.
|
||||
Close()
|
||||
// DeleteNominatedPodIfExists deletes nominatedPod from internal cache
|
||||
DeleteNominatedPodIfExists(pod *v1.Pod)
|
||||
}
|
||||
|
||||
// NewSchedulingQueue initializes a new scheduling queue. If pod priority is
|
||||
// enabled a priority queue is returned. If it is disabled, a FIFO is returned.
|
||||
func NewSchedulingQueue() SchedulingQueue {
|
||||
if util.PodPriorityEnabled() {
|
||||
return NewPriorityQueue()
|
||||
}
|
||||
return NewFIFO()
|
||||
}
|
||||
|
||||
// FIFO is basically a simple wrapper around cache.FIFO to make it compatible
|
||||
// with the SchedulingQueue interface.
|
||||
type FIFO struct {
|
||||
*cache.FIFO
|
||||
}
|
||||
|
||||
var _ = SchedulingQueue(&FIFO{}) // Making sure that FIFO implements SchedulingQueue.
|
||||
|
||||
// Add adds a pod to the FIFO.
|
||||
func (f *FIFO) Add(pod *v1.Pod) error {
|
||||
return f.FIFO.Add(pod)
|
||||
}
|
||||
|
||||
// AddIfNotPresent adds a pod to the FIFO if it is absent in the FIFO.
|
||||
func (f *FIFO) AddIfNotPresent(pod *v1.Pod) error {
|
||||
return f.FIFO.AddIfNotPresent(pod)
|
||||
}
|
||||
|
||||
// AddUnschedulableIfNotPresent adds an unschedulable pod back to the queue. In
|
||||
// FIFO it is added to the end of the queue.
|
||||
func (f *FIFO) AddUnschedulableIfNotPresent(pod *v1.Pod) error {
|
||||
return f.FIFO.AddIfNotPresent(pod)
|
||||
}
|
||||
|
||||
// Update updates a pod in the FIFO.
|
||||
func (f *FIFO) Update(oldPod, newPod *v1.Pod) error {
|
||||
return f.FIFO.Update(newPod)
|
||||
}
|
||||
|
||||
// Delete deletes a pod in the FIFO.
|
||||
func (f *FIFO) Delete(pod *v1.Pod) error {
|
||||
return f.FIFO.Delete(pod)
|
||||
}
|
||||
|
||||
// Pop removes the head of FIFO and returns it.
|
||||
// This is just a copy/paste of cache.Pop(queue Queue) from fifo.go that scheduler
|
||||
// has always been using. There is a comment in that file saying that this method
|
||||
// shouldn't be used in production code, but scheduler has always been using it.
|
||||
// This function does minimal error checking.
|
||||
func (f *FIFO) Pop() (*v1.Pod, error) {
|
||||
result, err := f.FIFO.Pop(func(obj interface{}) error { return nil })
|
||||
if err == cache.FIFOClosedError {
|
||||
return nil, fmt.Errorf(queueClosed)
|
||||
}
|
||||
return result.(*v1.Pod), err
|
||||
}
|
||||
|
||||
// WaitingPods returns all the waiting pods in the queue.
|
||||
func (f *FIFO) WaitingPods() []*v1.Pod {
|
||||
result := []*v1.Pod{}
|
||||
for _, pod := range f.FIFO.List() {
|
||||
result = append(result, pod.(*v1.Pod))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// FIFO does not need to react to events, as all pods are always in the active
|
||||
// scheduling queue anyway.
|
||||
|
||||
// AssignedPodAdded does nothing here.
|
||||
func (f *FIFO) AssignedPodAdded(pod *v1.Pod) {}
|
||||
|
||||
// AssignedPodUpdated does nothing here.
|
||||
func (f *FIFO) AssignedPodUpdated(pod *v1.Pod) {}
|
||||
|
||||
// MoveAllToActiveQueue does nothing in FIFO as all pods are always in the active queue.
|
||||
func (f *FIFO) MoveAllToActiveQueue() {}
|
||||
|
||||
// WaitingPodsForNode returns pods that are nominated to run on the given node,
|
||||
// but FIFO does not support it.
|
||||
func (f *FIFO) WaitingPodsForNode(nodeName string) []*v1.Pod {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the FIFO queue.
|
||||
func (f *FIFO) Close() {
|
||||
f.FIFO.Close()
|
||||
}
|
||||
|
||||
// DeleteNominatedPodIfExists does nothing in FIFO.
|
||||
func (f *FIFO) DeleteNominatedPodIfExists(pod *v1.Pod) {}
|
||||
|
||||
// NewFIFO creates a FIFO object.
|
||||
func NewFIFO() *FIFO {
|
||||
return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
}
|
||||
|
||||
// NominatedNodeName returns nominated node name of a Pod.
|
||||
func NominatedNodeName(pod *v1.Pod) string {
|
||||
return pod.Status.NominatedNodeName
|
||||
}
|
||||
|
||||
// PriorityQueue implements a scheduling queue. It is an alternative to FIFO.
|
||||
// The head of PriorityQueue is the highest priority pending pod. This structure
|
||||
// has two sub queues. One sub-queue holds pods that are being considered for
|
||||
// scheduling. This is called activeQ and is a Heap. Another queue holds
|
||||
// pods that are already tried and are determined to be unschedulable. The latter
|
||||
// is called unschedulableQ.
|
||||
type PriorityQueue struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// activeQ is heap structure that scheduler actively looks at to find pods to
|
||||
// schedule. Head of heap is the highest priority pod.
|
||||
activeQ *Heap
|
||||
// unschedulableQ holds pods that have been tried and determined unschedulable.
|
||||
unschedulableQ *UnschedulablePodsMap
|
||||
// nominatedPods is a map keyed by a node name and the value is a list of
|
||||
// pods which are nominated to run on the node. These are pods which can be in
|
||||
// the activeQ or unschedulableQ.
|
||||
nominatedPods map[string][]*v1.Pod
|
||||
// receivedMoveRequest is set to true whenever we receive a request to move a
|
||||
// pod from the unschedulableQ to the activeQ, and is set to false, when we pop
|
||||
// a pod from the activeQ. It indicates if we received a move request when a
|
||||
// pod was in flight (we were trying to schedule it). In such a case, we put
|
||||
// the pod back into the activeQ if it is determined unschedulable.
|
||||
receivedMoveRequest bool
|
||||
|
||||
// closed indicates that the queue is closed.
|
||||
// It is mainly used to let Pop() exit its control loop while waiting for an item.
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Making sure that PriorityQueue implements SchedulingQueue.
|
||||
var _ = SchedulingQueue(&PriorityQueue{})
|
||||
|
||||
// NewPriorityQueue creates a PriorityQueue object.
|
||||
func NewPriorityQueue() *PriorityQueue {
|
||||
pq := &PriorityQueue{
|
||||
activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod),
|
||||
unschedulableQ: newUnschedulablePodsMap(),
|
||||
nominatedPods: map[string][]*v1.Pod{},
|
||||
}
|
||||
pq.cond.L = &pq.lock
|
||||
return pq
|
||||
}
|
||||
|
||||
// addNominatedPodIfNeeded adds a pod to nominatedPods if it has a NominatedNodeName and it does not
|
||||
// already exist in the map. Adding an existing pod is not going to update the pod.
|
||||
func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) {
|
||||
nnn := NominatedNodeName(pod)
|
||||
if len(nnn) > 0 {
|
||||
for _, np := range p.nominatedPods[nnn] {
|
||||
if np.UID == pod.UID {
|
||||
klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
p.nominatedPods[nnn] = append(p.nominatedPods[nnn], pod)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteNominatedPodIfExists deletes a pod from the nominatedPods.
|
||||
// NOTE: this function assumes lock has been acquired in caller.
|
||||
func (p *PriorityQueue) deleteNominatedPodIfExists(pod *v1.Pod) {
|
||||
nnn := NominatedNodeName(pod)
|
||||
if len(nnn) > 0 {
|
||||
for i, np := range p.nominatedPods[nnn] {
|
||||
if np.UID == pod.UID {
|
||||
p.nominatedPods[nnn] = append(p.nominatedPods[nnn][:i], p.nominatedPods[nnn][i+1:]...)
|
||||
if len(p.nominatedPods[nnn]) == 0 {
|
||||
delete(p.nominatedPods, nnn)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateNominatedPod updates a pod in the nominatedPods.
|
||||
func (p *PriorityQueue) updateNominatedPod(oldPod, newPod *v1.Pod) {
|
||||
// Even if the nominated node name of the Pod is not changed, we must delete and add it again
|
||||
// to ensure that its pointer is updated.
|
||||
p.deleteNominatedPodIfExists(oldPod)
|
||||
p.addNominatedPodIfNeeded(newPod)
|
||||
}
|
||||
|
||||
// Add adds a pod to the active queue. It should be called only when a new pod
|
||||
// is added so there is no chance the pod is already in either queue.
|
||||
func (p *PriorityQueue) Add(pod *v1.Pod) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
err := p.activeQ.Add(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
} else {
|
||||
if p.unschedulableQ.get(pod) != nil {
|
||||
klog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name)
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.unschedulableQ.delete(pod)
|
||||
}
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// AddIfNotPresent adds a pod to the active queue if it is not present in any of
|
||||
// the two queues. If it is present in any, it doesn't do any thing.
|
||||
func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
if p.unschedulableQ.get(pod) != nil {
|
||||
return nil
|
||||
}
|
||||
if _, exists, _ := p.activeQ.Get(pod); exists {
|
||||
return nil
|
||||
}
|
||||
err := p.activeQ.Add(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
} else {
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func isPodUnschedulable(pod *v1.Pod) bool {
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable
|
||||
}
|
||||
|
||||
// AddUnschedulableIfNotPresent does nothing if the pod is present in either
|
||||
// queue. Otherwise it adds the pod to the unschedulable queue if
|
||||
// p.receivedMoveRequest is false, and to the activeQ if p.receivedMoveRequest is true.
|
||||
func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
if p.unschedulableQ.get(pod) != nil {
|
||||
return fmt.Errorf("pod is already present in unschedulableQ")
|
||||
}
|
||||
if _, exists, _ := p.activeQ.Get(pod); exists {
|
||||
return fmt.Errorf("pod is already present in the activeQ")
|
||||
}
|
||||
if !p.receivedMoveRequest && isPodUnschedulable(pod) {
|
||||
p.unschedulableQ.addOrUpdate(pod)
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
return nil
|
||||
}
|
||||
err := p.activeQ.Add(pod)
|
||||
if err == nil {
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Pop removes the head of the active queue and returns it. It blocks if the
|
||||
// activeQ is empty and waits until a new item is added to the queue. It also
|
||||
// clears receivedMoveRequest to mark the beginning of a new scheduling cycle.
|
||||
func (p *PriorityQueue) Pop() (*v1.Pod, error) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
for len(p.activeQ.data.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the p.closed is set and the condition is broadcast,
|
||||
// which causes this loop to continue and return from the Pop().
|
||||
if p.closed {
|
||||
return nil, fmt.Errorf(queueClosed)
|
||||
}
|
||||
p.cond.Wait()
|
||||
}
|
||||
obj, err := p.activeQ.Pop()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod := obj.(*v1.Pod)
|
||||
p.receivedMoveRequest = false
|
||||
return pod, err
|
||||
}
|
||||
|
||||
// isPodUpdated checks if the pod is updated in a way that it may have become
|
||||
// schedulable. It drops status of the pod and compares it with old version.
|
||||
func isPodUpdated(oldPod, newPod *v1.Pod) bool {
|
||||
strip := func(pod *v1.Pod) *v1.Pod {
|
||||
p := pod.DeepCopy()
|
||||
p.ResourceVersion = ""
|
||||
p.Generation = 0
|
||||
p.Status = v1.PodStatus{}
|
||||
return p
|
||||
}
|
||||
return !reflect.DeepEqual(strip(oldPod), strip(newPod))
|
||||
}
|
||||
|
||||
// Update updates a pod in the active queue if present. Otherwise, it removes
|
||||
// the item from the unschedulable queue and adds the updated one to the active
|
||||
// queue.
|
||||
func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
// If the pod is already in the active queue, just update it there.
|
||||
if _, exists, _ := p.activeQ.Get(newPod); exists {
|
||||
p.updateNominatedPod(oldPod, newPod)
|
||||
err := p.activeQ.Update(newPod)
|
||||
return err
|
||||
}
|
||||
// If the pod is in the unschedulable queue, updating it may make it schedulable.
|
||||
if usPod := p.unschedulableQ.get(newPod); usPod != nil {
|
||||
p.updateNominatedPod(oldPod, newPod)
|
||||
if isPodUpdated(oldPod, newPod) {
|
||||
p.unschedulableQ.delete(usPod)
|
||||
err := p.activeQ.Add(newPod)
|
||||
if err == nil {
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
}
|
||||
p.unschedulableQ.addOrUpdate(newPod)
|
||||
return nil
|
||||
}
|
||||
// If pod is not in any of the two queue, we put it in the active queue.
|
||||
err := p.activeQ.Add(newPod)
|
||||
if err == nil {
|
||||
p.addNominatedPodIfNeeded(newPod)
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete deletes the item from either of the two queues. It assumes the pod is
|
||||
// only in one queue.
|
||||
func (p *PriorityQueue) Delete(pod *v1.Pod) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
err := p.activeQ.Delete(pod)
|
||||
if err != nil { // The item was probably not found in the activeQ.
|
||||
p.unschedulableQ.delete(pod)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssignedPodAdded is called when a bound pod is added. Creation of this pod
|
||||
// may make pending pods with matching affinity terms schedulable.
|
||||
func (p *PriorityQueue) AssignedPodAdded(pod *v1.Pod) {
|
||||
p.lock.Lock()
|
||||
p.movePodsToActiveQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod))
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// AssignedPodUpdated is called when a bound pod is updated. Change of labels
|
||||
// may make pending pods with matching affinity terms schedulable.
|
||||
func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) {
|
||||
p.lock.Lock()
|
||||
p.movePodsToActiveQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod))
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// MoveAllToActiveQueue moves all pods from unschedulableQ to activeQ. This
|
||||
// function adds all pods and then signals the condition variable to ensure that
|
||||
// if Pop() is waiting for an item, it receives it after all the pods are in the
|
||||
// queue and the head is the highest priority pod.
|
||||
// TODO(bsalamat): We should add a back-off mechanism here so that a high priority
|
||||
// pod which is unschedulable does not go to the head of the queue frequently. For
|
||||
// example in a cluster where a lot of pods being deleted, such a high priority
|
||||
// pod can deprive other pods from getting scheduled.
|
||||
func (p *PriorityQueue) MoveAllToActiveQueue() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
for _, pod := range p.unschedulableQ.pods {
|
||||
if err := p.activeQ.Add(pod); err != nil {
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
p.unschedulableQ.clear()
|
||||
p.receivedMoveRequest = true
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
// NOTE: this function assumes lock has been acquired in caller
|
||||
func (p *PriorityQueue) movePodsToActiveQueue(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
if err := p.activeQ.Add(pod); err == nil {
|
||||
p.unschedulableQ.delete(pod)
|
||||
} else {
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
p.receivedMoveRequest = true
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
// getUnschedulablePodsWithMatchingAffinityTerm returns unschedulable pods which have
|
||||
// any affinity term that matches "pod".
|
||||
// NOTE: this function assumes lock has been acquired in caller.
|
||||
func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod) []*v1.Pod {
|
||||
var podsToMove []*v1.Pod
|
||||
for _, up := range p.unschedulableQ.pods {
|
||||
affinity := up.Spec.Affinity
|
||||
if affinity != nil && affinity.PodAffinity != nil {
|
||||
terms := predicates.GetPodAffinityTerms(affinity.PodAffinity)
|
||||
for _, term := range terms {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(up, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
klog.Errorf("Error getting label selectors for pod: %v.", up.Name)
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
|
||||
podsToMove = append(podsToMove, up)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return podsToMove
|
||||
}
|
||||
|
||||
// WaitingPodsForNode returns pods that are nominated to run on the given node,
|
||||
// but they are waiting for other pods to be removed from the node before they
|
||||
// can be actually scheduled.
|
||||
func (p *PriorityQueue) WaitingPodsForNode(nodeName string) []*v1.Pod {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
if list, ok := p.nominatedPods[nodeName]; ok {
|
||||
return list
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitingPods returns all the waiting pods in the queue.
|
||||
func (p *PriorityQueue) WaitingPods() []*v1.Pod {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
result := []*v1.Pod{}
|
||||
for _, pod := range p.activeQ.List() {
|
||||
result = append(result, pod.(*v1.Pod))
|
||||
}
|
||||
for _, pod := range p.unschedulableQ.pods {
|
||||
result = append(result, pod)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Close closes the priority queue.
|
||||
func (p *PriorityQueue) Close() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.closed = true
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
// DeleteNominatedPodIfExists deletes pod from internal cache if it's a nominatedPod
|
||||
func (p *PriorityQueue) DeleteNominatedPodIfExists(pod *v1.Pod) {
|
||||
p.lock.Lock()
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// UnschedulablePodsMap holds pods that cannot be scheduled. This data structure
|
||||
// is used to implement unschedulableQ.
|
||||
type UnschedulablePodsMap struct {
|
||||
// pods is a map key by a pod's full-name and the value is a pointer to the pod.
|
||||
pods map[string]*v1.Pod
|
||||
keyFunc func(*v1.Pod) string
|
||||
}
|
||||
|
||||
// Add adds a pod to the unschedulable pods.
|
||||
func (u *UnschedulablePodsMap) addOrUpdate(pod *v1.Pod) {
|
||||
u.pods[u.keyFunc(pod)] = pod
|
||||
}
|
||||
|
||||
// Delete deletes a pod from the unschedulable pods.
|
||||
func (u *UnschedulablePodsMap) delete(pod *v1.Pod) {
|
||||
delete(u.pods, u.keyFunc(pod))
|
||||
}
|
||||
|
||||
// Get returns the pod if a pod with the same key as the key of the given "pod"
|
||||
// is found in the map. It returns nil otherwise.
|
||||
func (u *UnschedulablePodsMap) get(pod *v1.Pod) *v1.Pod {
|
||||
podKey := u.keyFunc(pod)
|
||||
if p, exists := u.pods[podKey]; exists {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear removes all the entries from the unschedulable maps.
|
||||
func (u *UnschedulablePodsMap) clear() {
|
||||
u.pods = make(map[string]*v1.Pod)
|
||||
}
|
||||
|
||||
// newUnschedulablePodsMap initializes a new object of UnschedulablePodsMap.
|
||||
func newUnschedulablePodsMap() *UnschedulablePodsMap {
|
||||
return &UnschedulablePodsMap{
|
||||
pods: make(map[string]*v1.Pod),
|
||||
keyFunc: util.GetPodFullName,
|
||||
}
|
||||
}
|
||||
|
||||
// Below is the implementation of the a heap. The logic is pretty much the same
|
||||
// as cache.heap, however, this heap does not perform synchronization. It leaves
|
||||
// synchronization to the SchedulingQueue.
|
||||
|
||||
// LessFunc is a function type to compare two objects.
|
||||
type LessFunc func(interface{}, interface{}) bool
|
||||
|
||||
// KeyFunc is a function type to get the key from an object.
|
||||
type KeyFunc func(obj interface{}) (string, error)
|
||||
|
||||
type heapItem struct {
|
||||
obj interface{} // The object which is stored in the heap.
|
||||
index int // The index of the object's key in the Heap.queue.
|
||||
}
|
||||
|
||||
type itemKeyValue struct {
|
||||
key string
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
// heapData is an internal struct that implements the standard heap interface
|
||||
// and keeps the data stored in the heap.
|
||||
type heapData struct {
|
||||
// items is a map from key of the objects to the objects and their index.
|
||||
// We depend on the property that items in the map are in the queue and vice versa.
|
||||
items map[string]*heapItem
|
||||
// queue implements a heap data structure and keeps the order of elements
|
||||
// according to the heap invariant. The queue keeps the keys of objects stored
|
||||
// in "items".
|
||||
queue []string
|
||||
|
||||
// keyFunc is used to make the key used for queued item insertion and retrieval, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
// lessFunc is used to compare two objects in the heap.
|
||||
lessFunc LessFunc
|
||||
}
|
||||
|
||||
var (
|
||||
_ = heap.Interface(&heapData{}) // heapData is a standard heap
|
||||
)
|
||||
|
||||
// Less compares two objects and returns true if the first one should go
|
||||
// in front of the second one in the heap.
|
||||
func (h *heapData) Less(i, j int) bool {
|
||||
if i > len(h.queue) || j > len(h.queue) {
|
||||
return false
|
||||
}
|
||||
itemi, ok := h.items[h.queue[i]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itemj, ok := h.items[h.queue[j]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return h.lessFunc(itemi.obj, itemj.obj)
|
||||
}
|
||||
|
||||
// Len returns the number of items in the Heap.
|
||||
func (h *heapData) Len() int { return len(h.queue) }
|
||||
|
||||
// Swap implements swapping of two elements in the heap. This is a part of standard
|
||||
// heap interface and should never be called directly.
|
||||
func (h *heapData) Swap(i, j int) {
|
||||
h.queue[i], h.queue[j] = h.queue[j], h.queue[i]
|
||||
item := h.items[h.queue[i]]
|
||||
item.index = i
|
||||
item = h.items[h.queue[j]]
|
||||
item.index = j
|
||||
}
|
||||
|
||||
// Push is supposed to be called by heap.Push only.
|
||||
func (h *heapData) Push(kv interface{}) {
|
||||
keyValue := kv.(*itemKeyValue)
|
||||
n := len(h.queue)
|
||||
h.items[keyValue.key] = &heapItem{keyValue.obj, n}
|
||||
h.queue = append(h.queue, keyValue.key)
|
||||
}
|
||||
|
||||
// Pop is supposed to be called by heap.Pop only.
|
||||
func (h *heapData) Pop() interface{} {
|
||||
key := h.queue[len(h.queue)-1]
|
||||
h.queue = h.queue[0 : len(h.queue)-1]
|
||||
item, ok := h.items[key]
|
||||
if !ok {
|
||||
// This is an error
|
||||
return nil
|
||||
}
|
||||
delete(h.items, key)
|
||||
return item.obj
|
||||
}
|
||||
|
||||
// Heap is a producer/consumer queue that implements a heap data structure.
|
||||
// It can be used to implement priority queues and similar data structures.
|
||||
type Heap struct {
|
||||
// data stores objects and has a queue that keeps their ordering according
|
||||
// to the heap invariant.
|
||||
data *heapData
|
||||
}
|
||||
|
||||
// Add inserts an item, and puts it in the queue. The item is updated if it
|
||||
// already exists.
|
||||
func (h *Heap) Add(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return cache.KeyError{Obj: obj, Err: err}
|
||||
}
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
h.data.items[key].obj = obj
|
||||
heap.Fix(h.data, h.data.items[key].index)
|
||||
} else {
|
||||
heap.Push(h.data, &itemKeyValue{key, obj})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddIfNotPresent inserts an item, and puts it in the queue. If an item with
|
||||
// the key is present in the map, no changes is made to the item.
|
||||
func (h *Heap) AddIfNotPresent(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return cache.KeyError{Obj: obj, Err: err}
|
||||
}
|
||||
if _, exists := h.data.items[key]; !exists {
|
||||
heap.Push(h.data, &itemKeyValue{key, obj})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update is the same as Add in this implementation. When the item does not
|
||||
// exist, it is added.
|
||||
func (h *Heap) Update(obj interface{}) error {
|
||||
return h.Add(obj)
|
||||
}
|
||||
|
||||
// Delete removes an item.
|
||||
func (h *Heap) Delete(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return cache.KeyError{Obj: obj, Err: err}
|
||||
}
|
||||
if item, ok := h.data.items[key]; ok {
|
||||
heap.Remove(h.data, item.index)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("object not found")
|
||||
}
|
||||
|
||||
// Pop returns the head of the heap.
|
||||
func (h *Heap) Pop() (interface{}, error) {
|
||||
obj := heap.Pop(h.data)
|
||||
if obj != nil {
|
||||
return obj, nil
|
||||
}
|
||||
return nil, fmt.Errorf("object was removed from heap data")
|
||||
}
|
||||
|
||||
// Get returns the requested item, or sets exists=false.
|
||||
func (h *Heap) Get(obj interface{}) (interface{}, bool, error) {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, cache.KeyError{Obj: obj, Err: err}
|
||||
}
|
||||
return h.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the requested item, or sets exists=false.
|
||||
func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
|
||||
item, exists := h.data.items[key]
|
||||
if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
return item.obj, true, nil
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
func (h *Heap) List() []interface{} {
|
||||
list := make([]interface{}, 0, len(h.data.items))
|
||||
for _, item := range h.data.items {
|
||||
list = append(list, item.obj)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// newHeap returns a Heap which can be used to queue up items to process.
|
||||
func newHeap(keyFn KeyFunc, lessFn LessFunc) *Heap {
|
||||
return &Heap{
|
||||
data: &heapData{
|
||||
items: map[string]*heapItem{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFn,
|
||||
lessFunc: lessFn,
|
||||
},
|
||||
}
|
||||
}
|
514
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue_test.go
generated
vendored
Normal file
514
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue_test.go
generated
vendored
Normal file
@ -0,0 +1,514 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package queue
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int32(-100), int32(0), int32(100), int32(1000), int32(10000)
|
||||
var mediumPriority = (lowPriority + highPriority) / 2
|
||||
var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpp",
|
||||
Namespace: "ns1",
|
||||
UID: "hppns1",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
},
|
||||
v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpp",
|
||||
Namespace: "ns1",
|
||||
UID: "hppns1",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
NominatedNodeName: "node1",
|
||||
},
|
||||
},
|
||||
v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mpp",
|
||||
Namespace: "ns2",
|
||||
UID: "mppns2",
|
||||
Annotations: map[string]string{
|
||||
"annot2": "val2",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &mediumPriority,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
NominatedNodeName: "node1",
|
||||
},
|
||||
},
|
||||
v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "up",
|
||||
Namespace: "ns1",
|
||||
UID: "upns1",
|
||||
Annotations: map[string]string{
|
||||
"annot2": "val2",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &lowPriority,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: v1.PodReasonUnschedulable,
|
||||
},
|
||||
},
|
||||
NominatedNodeName: "node1",
|
||||
},
|
||||
}
|
||||
|
||||
func TestPriorityQueue_Add(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.Add(&medPriorityPod)
|
||||
q.Add(&unschedulablePod)
|
||||
q.Add(&highPriorityPod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &highPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &unschedulablePod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_AddIfNotPresent(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.unschedulableQ.addOrUpdate(&highPriNominatedPod)
|
||||
q.AddIfNotPresent(&highPriNominatedPod) // Must not add anything.
|
||||
q.AddIfNotPresent(&medPriorityPod)
|
||||
q.AddIfNotPresent(&unschedulablePod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &unschedulablePod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
}
|
||||
if q.unschedulableQ.get(&highPriNominatedPod) != &highPriNominatedPod {
|
||||
t.Errorf("Pod %v was not found in the unschedulableQ.", highPriNominatedPod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.Add(&highPriNominatedPod)
|
||||
q.AddUnschedulableIfNotPresent(&highPriNominatedPod) // Must not add anything.
|
||||
q.AddUnschedulableIfNotPresent(&medPriorityPod) // This should go to activeQ.
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&highPriNominatedPod, &medPriorityPod, &unschedulablePod},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &highPriNominatedPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", highPriNominatedPod.Name, p.Name)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
t.Errorf("Expected nomindatePods to have one element: %v", q.nominatedPods)
|
||||
}
|
||||
if q.unschedulableQ.get(&unschedulablePod) != &unschedulablePod {
|
||||
t.Errorf("Pod %v was not found in the unschedulableQ.", unschedulablePod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_Pop(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 1 {
|
||||
t.Errorf("Expected medPriorityPod to be present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
}
|
||||
}()
|
||||
q.Add(&medPriorityPod)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestPriorityQueue_Update(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.Update(nil, &highPriorityPod)
|
||||
if _, exists, _ := q.activeQ.Get(&highPriorityPod); !exists {
|
||||
t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 0 {
|
||||
t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods)
|
||||
}
|
||||
// Update highPriorityPod and add a nominatedNodeName to it.
|
||||
q.Update(&highPriorityPod, &highPriNominatedPod)
|
||||
if q.activeQ.data.Len() != 1 {
|
||||
t.Error("Expected only one item in activeQ.")
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
t.Errorf("Expected one item in nomindatePods map: %v", q.nominatedPods)
|
||||
}
|
||||
// Updating an unschedulable pod which is not in any of the two queues, should
|
||||
// add the pod to activeQ.
|
||||
q.Update(&unschedulablePod, &unschedulablePod)
|
||||
if _, exists, _ := q.activeQ.Get(&unschedulablePod); !exists {
|
||||
t.Errorf("Expected %v to be added to activeQ.", unschedulablePod.Name)
|
||||
}
|
||||
// Updating a pod that is already in activeQ, should not change it.
|
||||
q.Update(&unschedulablePod, &unschedulablePod)
|
||||
if len(q.unschedulableQ.pods) != 0 {
|
||||
t.Error("Expected unschedulableQ to be empty.")
|
||||
}
|
||||
if _, exists, _ := q.activeQ.Get(&unschedulablePod); !exists {
|
||||
t.Errorf("Expected: %v to be added to activeQ.", unschedulablePod.Name)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &highPriNominatedPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_Delete(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.Update(&highPriorityPod, &highPriNominatedPod)
|
||||
q.Add(&unschedulablePod)
|
||||
q.Delete(&highPriNominatedPod)
|
||||
if _, exists, _ := q.activeQ.Get(&unschedulablePod); !exists {
|
||||
t.Errorf("Expected %v to be in activeQ.", unschedulablePod.Name)
|
||||
}
|
||||
if _, exists, _ := q.activeQ.Get(&highPriNominatedPod); exists {
|
||||
t.Errorf("Didn't expect %v to be in activeQ.", highPriorityPod.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
t.Errorf("Expected nomindatePods to have only 'unschedulablePod': %v", q.nominatedPods)
|
||||
}
|
||||
q.Delete(&unschedulablePod)
|
||||
if len(q.nominatedPods) != 0 {
|
||||
t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_MoveAllToActiveQueue(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.Add(&medPriorityPod)
|
||||
q.unschedulableQ.addOrUpdate(&unschedulablePod)
|
||||
q.unschedulableQ.addOrUpdate(&highPriorityPod)
|
||||
q.MoveAllToActiveQueue()
|
||||
if q.activeQ.data.Len() != 3 {
|
||||
t.Error("Expected all items to be in activeQ.")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPriorityQueue_AssignedPodAdded tests AssignedPodAdded. It checks that
|
||||
// when a pod with pod affinity is in unschedulableQ and another pod with a
|
||||
// matching label is added, the unschedulable pod is moved to activeQ.
|
||||
func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
|
||||
affinityPod := unschedulablePod.DeepCopy()
|
||||
affinityPod.Name = "afp"
|
||||
affinityPod.Spec = v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "service",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"securityscan", "value2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Priority: &mediumPriority,
|
||||
}
|
||||
labelPod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "lbp",
|
||||
Namespace: affinityPod.Namespace,
|
||||
Labels: map[string]string{"service": "securityscan"},
|
||||
},
|
||||
Spec: v1.PodSpec{NodeName: "machine1"},
|
||||
}
|
||||
|
||||
q := NewPriorityQueue()
|
||||
q.Add(&medPriorityPod)
|
||||
// Add a couple of pods to the unschedulableQ.
|
||||
q.unschedulableQ.addOrUpdate(&unschedulablePod)
|
||||
q.unschedulableQ.addOrUpdate(affinityPod)
|
||||
// Simulate addition of an assigned pod. The pod has matching labels for
|
||||
// affinityPod. So, affinityPod should go to activeQ.
|
||||
q.AssignedPodAdded(&labelPod)
|
||||
if q.unschedulableQ.get(affinityPod) != nil {
|
||||
t.Error("affinityPod is still in the unschedulableQ.")
|
||||
}
|
||||
if _, exists, _ := q.activeQ.Get(affinityPod); !exists {
|
||||
t.Error("affinityPod is not moved to activeQ.")
|
||||
}
|
||||
// Check that the other pod is still in the unschedulableQ.
|
||||
if q.unschedulableQ.get(&unschedulablePod) == nil {
|
||||
t.Error("unschedulablePod is not in the unschedulableQ.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_WaitingPodsForNode(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.Add(&medPriorityPod)
|
||||
q.Add(&unschedulablePod)
|
||||
q.Add(&highPriorityPod)
|
||||
if p, err := q.Pop(); err != nil || p != &highPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name)
|
||||
}
|
||||
expectedList := []*v1.Pod{&medPriorityPod, &unschedulablePod}
|
||||
if !reflect.DeepEqual(expectedList, q.WaitingPodsForNode("node1")) {
|
||||
t.Error("Unexpected list of nominated Pods for node.")
|
||||
}
|
||||
if q.WaitingPodsForNode("node2") != nil {
|
||||
t.Error("Expected list of nominated Pods for node2 to be empty.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnschedulablePodsMap(t *testing.T) {
|
||||
var pods = []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "p0",
|
||||
Namespace: "ns1",
|
||||
Annotations: map[string]string{
|
||||
"annot1": "val1",
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
NominatedNodeName: "node1",
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "p1",
|
||||
Namespace: "ns1",
|
||||
Annotations: map[string]string{
|
||||
"annot": "val",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "p2",
|
||||
Namespace: "ns2",
|
||||
Annotations: map[string]string{
|
||||
"annot2": "val2", "annot3": "val3",
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
NominatedNodeName: "node3",
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "p3",
|
||||
Namespace: "ns4",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
NominatedNodeName: "node1",
|
||||
},
|
||||
},
|
||||
}
|
||||
var updatedPods = make([]*v1.Pod, len(pods))
|
||||
updatedPods[0] = pods[0].DeepCopy()
|
||||
updatedPods[1] = pods[1].DeepCopy()
|
||||
updatedPods[3] = pods[3].DeepCopy()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
podsToAdd []*v1.Pod
|
||||
expectedMapAfterAdd map[string]*v1.Pod
|
||||
podsToUpdate []*v1.Pod
|
||||
expectedMapAfterUpdate map[string]*v1.Pod
|
||||
podsToDelete []*v1.Pod
|
||||
expectedMapAfterDelete map[string]*v1.Pod
|
||||
}{
|
||||
{
|
||||
name: "create, update, delete subset of pods",
|
||||
podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]},
|
||||
expectedMapAfterAdd: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[0]): pods[0],
|
||||
util.GetPodFullName(pods[1]): pods[1],
|
||||
util.GetPodFullName(pods[2]): pods[2],
|
||||
util.GetPodFullName(pods[3]): pods[3],
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[0]},
|
||||
expectedMapAfterUpdate: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[0]): updatedPods[0],
|
||||
util.GetPodFullName(pods[1]): pods[1],
|
||||
util.GetPodFullName(pods[2]): pods[2],
|
||||
util.GetPodFullName(pods[3]): pods[3],
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[0], pods[1]},
|
||||
expectedMapAfterDelete: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[2]): pods[2],
|
||||
util.GetPodFullName(pods[3]): pods[3],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create, update, delete all",
|
||||
podsToAdd: []*v1.Pod{pods[0], pods[3]},
|
||||
expectedMapAfterAdd: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[0]): pods[0],
|
||||
util.GetPodFullName(pods[3]): pods[3],
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[3]},
|
||||
expectedMapAfterUpdate: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[0]): pods[0],
|
||||
util.GetPodFullName(pods[3]): updatedPods[3],
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[0], pods[3]},
|
||||
expectedMapAfterDelete: map[string]*v1.Pod{},
|
||||
},
|
||||
{
|
||||
name: "delete non-existing and existing pods",
|
||||
podsToAdd: []*v1.Pod{pods[1], pods[2]},
|
||||
expectedMapAfterAdd: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[1]): pods[1],
|
||||
util.GetPodFullName(pods[2]): pods[2],
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[1]},
|
||||
expectedMapAfterUpdate: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[1]): updatedPods[1],
|
||||
util.GetPodFullName(pods[2]): pods[2],
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[2], pods[3]},
|
||||
expectedMapAfterDelete: map[string]*v1.Pod{
|
||||
util.GetPodFullName(pods[1]): updatedPods[1],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
upm := newUnschedulablePodsMap()
|
||||
for _, p := range test.podsToAdd {
|
||||
upm.addOrUpdate(p)
|
||||
}
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterAdd) {
|
||||
t.Errorf("Unexpected map after adding pods. Expected: %v, got: %v",
|
||||
test.expectedMapAfterAdd, upm.pods)
|
||||
}
|
||||
|
||||
if len(test.podsToUpdate) > 0 {
|
||||
for _, p := range test.podsToUpdate {
|
||||
upm.addOrUpdate(p)
|
||||
}
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterUpdate) {
|
||||
t.Errorf("Unexpected map after updating pods. Expected: %v, got: %v",
|
||||
test.expectedMapAfterUpdate, upm.pods)
|
||||
}
|
||||
}
|
||||
for _, p := range test.podsToDelete {
|
||||
upm.delete(p)
|
||||
}
|
||||
if !reflect.DeepEqual(upm.pods, test.expectedMapAfterDelete) {
|
||||
t.Errorf("Unexpected map after deleting pods. Expected: %v, got: %v",
|
||||
test.expectedMapAfterDelete, upm.pods)
|
||||
}
|
||||
upm.clear()
|
||||
if len(upm.pods) != 0 {
|
||||
t.Errorf("Expected the map to be empty, but has %v elements.", len(upm.pods))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulingQueue_Close(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
q SchedulingQueue
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "FIFO close",
|
||||
q: NewFIFO(),
|
||||
expectedErr: fmt.Errorf(queueClosed),
|
||||
},
|
||||
{
|
||||
name: "PriorityQueue close",
|
||||
q: NewPriorityQueue(),
|
||||
expectedErr: fmt.Errorf(queueClosed),
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
pod, err := test.q.Pop()
|
||||
if err.Error() != test.expectedErr.Error() {
|
||||
t.Errorf("Expected err %q from Pop() if queue is closed, but got %q", test.expectedErr.Error(), err.Error())
|
||||
}
|
||||
if pod != nil {
|
||||
t.Errorf("Expected pod nil from Pop() if queue is closed, but got: %v", pod)
|
||||
}
|
||||
}()
|
||||
test.q.Close()
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user