mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor files
This commit is contained in:
57
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["preemption.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/preemption",
|
||||
deps = [
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/apis/core/v1/helper/qos:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/eviction:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["preemption_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/preemption",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
265
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go
generated
vendored
Normal file
265
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go
generated
vendored
Normal file
@ -0,0 +1,265 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package preemption
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
)
|
||||
|
||||
const message = "Preempted in order to admit critical pod"
|
||||
|
||||
// CriticalPodAdmissionFailureHandler is an AdmissionFailureHandler that handles admission failure for Critical Pods.
|
||||
// If the ONLY admission failures are due to insufficient resources, then CriticalPodAdmissionHandler evicts pods
|
||||
// so that the critical pod can be admitted. For evictions, the CriticalPodAdmissionHandler evicts a set of pods that
|
||||
// frees up the required resource requests. The set of pods is designed to minimize impact, and is prioritized according to the ordering:
|
||||
// minimal impact for guaranteed pods > minimal impact for burstable pods > minimal impact for besteffort pods.
|
||||
// minimal impact is defined as follows: fewest pods evicted > fewest total requests of pods.
|
||||
// finding the fewest total requests of pods is considered besteffort.
|
||||
type CriticalPodAdmissionHandler struct {
|
||||
getPodsFunc eviction.ActivePodsFunc
|
||||
killPodFunc eviction.KillPodFunc
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
var _ lifecycle.AdmissionFailureHandler = &CriticalPodAdmissionHandler{}
|
||||
|
||||
func NewCriticalPodAdmissionHandler(getPodsFunc eviction.ActivePodsFunc, killPodFunc eviction.KillPodFunc, recorder record.EventRecorder) *CriticalPodAdmissionHandler {
|
||||
return &CriticalPodAdmissionHandler{
|
||||
getPodsFunc: getPodsFunc,
|
||||
killPodFunc: killPodFunc,
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
// HandleAdmissionFailure gracefully handles admission rejection, and, in some cases,
|
||||
// to allow admission of the pod despite its previous failure.
|
||||
func (c *CriticalPodAdmissionHandler) HandleAdmissionFailure(pod *v1.Pod, failureReasons []algorithm.PredicateFailureReason) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
if !kubetypes.IsCriticalPod(pod) || !utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) {
|
||||
return false, failureReasons, nil
|
||||
}
|
||||
// InsufficientResourceError is not a reason to reject a critical pod.
|
||||
// Instead of rejecting, we free up resources to admit it, if no other reasons for rejection exist.
|
||||
nonResourceReasons := []algorithm.PredicateFailureReason{}
|
||||
resourceReasons := []*admissionRequirement{}
|
||||
for _, reason := range failureReasons {
|
||||
if r, ok := reason.(*predicates.InsufficientResourceError); ok {
|
||||
resourceReasons = append(resourceReasons, &admissionRequirement{
|
||||
resourceName: r.ResourceName,
|
||||
quantity: r.GetInsufficientAmount(),
|
||||
})
|
||||
} else {
|
||||
nonResourceReasons = append(nonResourceReasons, reason)
|
||||
}
|
||||
}
|
||||
if len(nonResourceReasons) > 0 {
|
||||
// Return only reasons that are not resource related, since critical pods cannot fail admission for resource reasons.
|
||||
return false, nonResourceReasons, nil
|
||||
}
|
||||
err := c.evictPodsToFreeRequests(admissionRequirementList(resourceReasons))
|
||||
// if no error is returned, preemption succeeded and the pod is safe to admit.
|
||||
return err == nil, nil, err
|
||||
}
|
||||
|
||||
// freeRequests takes a list of insufficient resources, and attempts to free them by evicting pods
|
||||
// based on requests. For example, if the only insufficient resource is 200Mb of memory, this function could
|
||||
// evict a pod with request=250Mb.
|
||||
func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(insufficientResources admissionRequirementList) error {
|
||||
podsToPreempt, err := getPodsToPreempt(c.getPodsFunc(), insufficientResources)
|
||||
if err != nil {
|
||||
return fmt.Errorf("preemption: error finding a set of pods to preempt: %v", err)
|
||||
}
|
||||
glog.Infof("preemption: attempting to evict pods %v, in order to free up resources: %s", podsToPreempt, insufficientResources.toString())
|
||||
for _, pod := range podsToPreempt {
|
||||
status := v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
Message: message,
|
||||
Reason: events.PreemptContainer,
|
||||
}
|
||||
// record that we are evicting the pod
|
||||
c.recorder.Eventf(pod, v1.EventTypeWarning, events.PreemptContainer, message)
|
||||
// this is a blocking call and should only return when the pod and its containers are killed.
|
||||
err := c.killPodFunc(pod, status, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("preemption: pod %s failed to evict %v", format.Pod(pod), err)
|
||||
}
|
||||
glog.Infof("preemption: pod %s evicted successfully", format.Pod(pod))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getPodsToPreempt returns a list of pods that could be preempted to free requests >= requirements
|
||||
func getPodsToPreempt(pods []*v1.Pod, requirements admissionRequirementList) ([]*v1.Pod, error) {
|
||||
bestEffortPods, burstablePods, guaranteedPods := sortPodsByQOS(pods)
|
||||
|
||||
// make sure that pods exist to reclaim the requirements
|
||||
unableToMeetRequirements := requirements.subtract(append(append(bestEffortPods, burstablePods...), guaranteedPods...)...)
|
||||
if len(unableToMeetRequirements) > 0 {
|
||||
return nil, fmt.Errorf("no set of running pods found to reclaim resources: %v", unableToMeetRequirements.toString())
|
||||
}
|
||||
// find the guaranteed pods we would need to evict if we already evicted ALL burstable and besteffort pods.
|
||||
guarateedToEvict, err := getPodsToPreemptByDistance(guaranteedPods, requirements.subtract(append(bestEffortPods, burstablePods...)...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Find the burstable pods we would need to evict if we already evicted ALL besteffort pods, and the required guaranteed pods.
|
||||
burstableToEvict, err := getPodsToPreemptByDistance(burstablePods, requirements.subtract(append(bestEffortPods, guarateedToEvict...)...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Find the besteffort pods we would need to evict if we already evicted the required guaranteed and burstable pods.
|
||||
bestEffortToEvict, err := getPodsToPreemptByDistance(bestEffortPods, requirements.subtract(append(burstableToEvict, guarateedToEvict...)...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(append(bestEffortToEvict, burstableToEvict...), guarateedToEvict...), nil
|
||||
}
|
||||
|
||||
// finds the pods that have pod requests >= admission requirements.
|
||||
// Chooses pods that minimize "distance" to the requirements.
|
||||
// If more than one pod exists that fulfills the remaining requirements,
|
||||
// it chooses the pod that has the "smaller resource request"
|
||||
// This method, by repeatedly choosing the pod that fulfills as much of the requirements as possible,
|
||||
// attempts to minimize the number of pods returned.
|
||||
func getPodsToPreemptByDistance(pods []*v1.Pod, requirements admissionRequirementList) ([]*v1.Pod, error) {
|
||||
podsToEvict := []*v1.Pod{}
|
||||
// evict pods by shortest distance from remaining requirements, updating requirements every round.
|
||||
for len(requirements) > 0 {
|
||||
if len(pods) == 0 {
|
||||
return nil, fmt.Errorf("no set of running pods found to reclaim resources: %v", requirements.toString())
|
||||
}
|
||||
// all distances must be less than len(requirements), because the max distance for a single requirement is 1
|
||||
bestDistance := float64(len(requirements) + 1)
|
||||
bestPodIndex := 0
|
||||
// Find the pod with the smallest distance from requirements
|
||||
// Or, in the case of two equidistant pods, find the pod with "smaller" resource requests.
|
||||
for i, pod := range pods {
|
||||
dist := requirements.distance(pod)
|
||||
if dist < bestDistance || (bestDistance == dist && smallerResourceRequest(pod, pods[bestPodIndex])) {
|
||||
bestDistance = dist
|
||||
bestPodIndex = i
|
||||
}
|
||||
}
|
||||
// subtract the pod from requirements, and transfer the pod from input-pods to pods-to-evicted
|
||||
requirements = requirements.subtract(pods[bestPodIndex])
|
||||
podsToEvict = append(podsToEvict, pods[bestPodIndex])
|
||||
pods[bestPodIndex] = pods[len(pods)-1]
|
||||
pods = pods[:len(pods)-1]
|
||||
}
|
||||
return podsToEvict, nil
|
||||
}
|
||||
|
||||
type admissionRequirement struct {
|
||||
resourceName v1.ResourceName
|
||||
quantity int64
|
||||
}
|
||||
|
||||
type admissionRequirementList []*admissionRequirement
|
||||
|
||||
// distance of the pods requests from the admissionRequirements.
|
||||
// distance is measured by the fraction of the requirement satisfied by the pod,
|
||||
// so that each requirement is weighted equally, regardless of absolute magnitude.
|
||||
func (a admissionRequirementList) distance(pod *v1.Pod) float64 {
|
||||
dist := float64(0)
|
||||
for _, req := range a {
|
||||
remainingRequest := float64(req.quantity - resource.GetResourceRequest(pod, req.resourceName))
|
||||
if remainingRequest < 0 {
|
||||
remainingRequest = 0
|
||||
}
|
||||
dist += math.Pow(remainingRequest/float64(req.quantity), 2)
|
||||
}
|
||||
return dist
|
||||
}
|
||||
|
||||
// returns a new admissionRequirementList containing remaining requirements if the provided pod
|
||||
// were to be preempted
|
||||
func (a admissionRequirementList) subtract(pods ...*v1.Pod) admissionRequirementList {
|
||||
newList := []*admissionRequirement{}
|
||||
for _, req := range a {
|
||||
newQuantity := req.quantity
|
||||
for _, pod := range pods {
|
||||
newQuantity -= resource.GetResourceRequest(pod, req.resourceName)
|
||||
}
|
||||
if newQuantity > 0 {
|
||||
newList = append(newList, &admissionRequirement{
|
||||
resourceName: req.resourceName,
|
||||
quantity: newQuantity,
|
||||
})
|
||||
}
|
||||
}
|
||||
return newList
|
||||
}
|
||||
|
||||
func (a admissionRequirementList) toString() string {
|
||||
s := "["
|
||||
for _, req := range a {
|
||||
s += fmt.Sprintf("(res: %v, q: %d), ", req.resourceName, req.quantity)
|
||||
}
|
||||
return s + "]"
|
||||
}
|
||||
|
||||
// returns lists containing non-critical besteffort, burstable, and guaranteed pods
|
||||
func sortPodsByQOS(pods []*v1.Pod) (bestEffort, burstable, guaranteed []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
if !kubetypes.IsCriticalPod(pod) {
|
||||
switch v1qos.GetPodQOS(pod) {
|
||||
case v1.PodQOSBestEffort:
|
||||
bestEffort = append(bestEffort, pod)
|
||||
case v1.PodQOSBurstable:
|
||||
burstable = append(burstable, pod)
|
||||
case v1.PodQOSGuaranteed:
|
||||
guaranteed = append(guaranteed, pod)
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// returns true if pod1 has a smaller request than pod2
|
||||
func smallerResourceRequest(pod1 *v1.Pod, pod2 *v1.Pod) bool {
|
||||
priorityList := []v1.ResourceName{
|
||||
v1.ResourceNvidiaGPU,
|
||||
v1.ResourceMemory,
|
||||
v1.ResourceCPU,
|
||||
}
|
||||
for _, res := range priorityList {
|
||||
req1 := resource.GetResourceRequest(pod1, res)
|
||||
req2 := resource.GetResourceRequest(pod2, res)
|
||||
if req1 < req2 {
|
||||
return true
|
||||
} else if req1 > req2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
480
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption_test.go
generated
vendored
Normal file
480
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption_test.go
generated
vendored
Normal file
@ -0,0 +1,480 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package preemption
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
critical = "critical"
|
||||
bestEffort = "bestEffort"
|
||||
burstable = "burstable"
|
||||
highRequestBurstable = "high-request-burstable"
|
||||
guaranteed = "guaranteed"
|
||||
highRequestGuaranteed = "high-request-guaranteed"
|
||||
tinyBurstable = "tiny"
|
||||
maxPods = 110
|
||||
)
|
||||
|
||||
type fakePodKiller struct {
|
||||
killedPods []*v1.Pod
|
||||
}
|
||||
|
||||
func newFakePodKiller() *fakePodKiller {
|
||||
return &fakePodKiller{killedPods: []*v1.Pod{}}
|
||||
}
|
||||
|
||||
func (f *fakePodKiller) clear() {
|
||||
f.killedPods = []*v1.Pod{}
|
||||
}
|
||||
|
||||
func (f *fakePodKiller) getKilledPods() []*v1.Pod {
|
||||
return f.killedPods
|
||||
}
|
||||
|
||||
func (f *fakePodKiller) killPodNow(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error {
|
||||
f.killedPods = append(f.killedPods, pod)
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakePodProvider struct {
|
||||
pods []*v1.Pod
|
||||
}
|
||||
|
||||
func newFakePodProvider() *fakePodProvider {
|
||||
return &fakePodProvider{pods: []*v1.Pod{}}
|
||||
}
|
||||
|
||||
func (f *fakePodProvider) setPods(pods []*v1.Pod) {
|
||||
f.pods = pods
|
||||
}
|
||||
|
||||
func (f *fakePodProvider) getPods() []*v1.Pod {
|
||||
return f.pods
|
||||
}
|
||||
|
||||
func getTestCriticalPodAdmissionHandler(podProvider *fakePodProvider, podKiller *fakePodKiller) *CriticalPodAdmissionHandler {
|
||||
return &CriticalPodAdmissionHandler{
|
||||
getPodsFunc: podProvider.getPods,
|
||||
killPodFunc: podKiller.killPodNow,
|
||||
recorder: &record.FakeRecorder{},
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictPodsToFreeRequests(t *testing.T) {
|
||||
type testRun struct {
|
||||
testName string
|
||||
inputPods []*v1.Pod
|
||||
insufficientResources admissionRequirementList
|
||||
expectErr bool
|
||||
expectedOutput []*v1.Pod
|
||||
}
|
||||
podProvider := newFakePodProvider()
|
||||
podKiller := newFakePodKiller()
|
||||
criticalPodAdmissionHandler := getTestCriticalPodAdmissionHandler(podProvider, podKiller)
|
||||
allPods := getTestPods()
|
||||
runs := []testRun{
|
||||
{
|
||||
testName: "critical pods cannot be preempted",
|
||||
inputPods: []*v1.Pod{allPods[critical]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 1),
|
||||
expectErr: true,
|
||||
expectedOutput: nil,
|
||||
},
|
||||
{
|
||||
testName: "best effort pods are not preempted when attempting to free resources",
|
||||
inputPods: []*v1.Pod{allPods[bestEffort]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 1, 0),
|
||||
expectErr: true,
|
||||
expectedOutput: nil,
|
||||
},
|
||||
{
|
||||
testName: "multiple pods evicted",
|
||||
inputPods: []*v1.Pod{
|
||||
allPods[critical], allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable],
|
||||
allPods[guaranteed], allPods[highRequestGuaranteed]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 550, 0),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]},
|
||||
},
|
||||
}
|
||||
for _, r := range runs {
|
||||
podProvider.setPods(r.inputPods)
|
||||
outErr := criticalPodAdmissionHandler.evictPodsToFreeRequests(r.insufficientResources)
|
||||
outputPods := podKiller.getKilledPods()
|
||||
if !r.expectErr && outErr != nil {
|
||||
t.Errorf("evictPodsToFreeRequests returned an unexpected error during the %s test. Err: %v", r.testName, outErr)
|
||||
} else if r.expectErr && outErr == nil {
|
||||
t.Errorf("evictPodsToFreeRequests expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName)
|
||||
} else if !podListEqual(r.expectedOutput, outputPods) {
|
||||
t.Errorf("evictPodsToFreeRequests expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName)
|
||||
}
|
||||
podKiller.clear()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetPodsToPreempt(t *testing.B) {
|
||||
allPods := getTestPods()
|
||||
inputPods := []*v1.Pod{}
|
||||
for i := 0; i < maxPods; i++ {
|
||||
inputPods = append(inputPods, allPods[tinyBurstable])
|
||||
}
|
||||
for n := 0; n < t.N; n++ {
|
||||
getPodsToPreempt(inputPods, admissionRequirementList([]*admissionRequirement{
|
||||
{
|
||||
resourceName: v1.ResourceCPU,
|
||||
quantity: parseCPUToInt64("110m"),
|
||||
}}))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodsToPreempt(t *testing.T) {
|
||||
type testRun struct {
|
||||
testName string
|
||||
inputPods []*v1.Pod
|
||||
insufficientResources admissionRequirementList
|
||||
expectErr bool
|
||||
expectedOutput []*v1.Pod
|
||||
}
|
||||
allPods := getTestPods()
|
||||
runs := []testRun{
|
||||
{
|
||||
testName: "no requirements",
|
||||
inputPods: []*v1.Pod{},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 0),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
testName: "no pods",
|
||||
inputPods: []*v1.Pod{},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 1),
|
||||
expectErr: true,
|
||||
expectedOutput: nil,
|
||||
},
|
||||
{
|
||||
testName: "equal pods and resources requirements",
|
||||
inputPods: []*v1.Pod{allPods[burstable]},
|
||||
insufficientResources: getAdmissionRequirementList(100, 100, 1),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[burstable]},
|
||||
},
|
||||
{
|
||||
testName: "higer requirements than pod requests",
|
||||
inputPods: []*v1.Pod{allPods[burstable]},
|
||||
insufficientResources: getAdmissionRequirementList(200, 200, 2),
|
||||
expectErr: true,
|
||||
expectedOutput: nil,
|
||||
},
|
||||
{
|
||||
testName: "choose between bestEffort and burstable",
|
||||
inputPods: []*v1.Pod{allPods[burstable], allPods[bestEffort]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 1),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[bestEffort]},
|
||||
},
|
||||
{
|
||||
testName: "choose between burstable and guaranteed",
|
||||
inputPods: []*v1.Pod{allPods[burstable], allPods[guaranteed]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 0, 1),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[burstable]},
|
||||
},
|
||||
{
|
||||
testName: "choose lower request burstable if it meets requirements",
|
||||
inputPods: []*v1.Pod{allPods[bestEffort], allPods[highRequestBurstable], allPods[burstable]},
|
||||
insufficientResources: getAdmissionRequirementList(100, 100, 0),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[burstable]},
|
||||
},
|
||||
{
|
||||
testName: "choose higher request burstable if lower does not meet requirements",
|
||||
inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable]},
|
||||
insufficientResources: getAdmissionRequirementList(150, 150, 0),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[highRequestBurstable]},
|
||||
},
|
||||
{
|
||||
testName: "multiple pods required",
|
||||
inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable], allPods[guaranteed], allPods[highRequestGuaranteed]},
|
||||
insufficientResources: getAdmissionRequirementList(350, 350, 0),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[burstable], allPods[highRequestBurstable]},
|
||||
},
|
||||
{
|
||||
testName: "evict guaranteed when we have to, and dont evict the extra burstable",
|
||||
inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable], allPods[guaranteed], allPods[highRequestGuaranteed]},
|
||||
insufficientResources: getAdmissionRequirementList(0, 550, 0),
|
||||
expectErr: false,
|
||||
expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]},
|
||||
},
|
||||
}
|
||||
for _, r := range runs {
|
||||
outputPods, outErr := getPodsToPreempt(r.inputPods, r.insufficientResources)
|
||||
if !r.expectErr && outErr != nil {
|
||||
t.Errorf("getPodsToPreempt returned an unexpected error during the %s test. Err: %v", r.testName, outErr)
|
||||
} else if r.expectErr && outErr == nil {
|
||||
t.Errorf("getPodsToPreempt expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName)
|
||||
} else if !podListEqual(r.expectedOutput, outputPods) {
|
||||
t.Errorf("getPodsToPreempt expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdmissionRequirementsDistance(t *testing.T) {
|
||||
type testRun struct {
|
||||
testName string
|
||||
requirements admissionRequirementList
|
||||
inputPod *v1.Pod
|
||||
expectedOutput float64
|
||||
}
|
||||
allPods := getTestPods()
|
||||
runs := []testRun{
|
||||
{
|
||||
testName: "no requirements",
|
||||
requirements: getAdmissionRequirementList(0, 0, 0),
|
||||
inputPod: allPods[burstable],
|
||||
expectedOutput: 0,
|
||||
},
|
||||
{
|
||||
testName: "no requests, some requirements",
|
||||
requirements: getAdmissionRequirementList(100, 100, 1),
|
||||
inputPod: allPods[bestEffort],
|
||||
expectedOutput: 2,
|
||||
},
|
||||
{
|
||||
testName: "equal requests and requirements",
|
||||
requirements: getAdmissionRequirementList(100, 100, 1),
|
||||
inputPod: allPods[burstable],
|
||||
expectedOutput: 0,
|
||||
},
|
||||
{
|
||||
testName: "higher requests than requirements",
|
||||
requirements: getAdmissionRequirementList(50, 50, 0),
|
||||
inputPod: allPods[burstable],
|
||||
expectedOutput: 0,
|
||||
},
|
||||
}
|
||||
for _, run := range runs {
|
||||
output := run.requirements.distance(run.inputPod)
|
||||
if output != run.expectedOutput {
|
||||
t.Errorf("expected: %f, got: %f for %s test", run.expectedOutput, output, run.testName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdmissionRequirementsSubtract(t *testing.T) {
|
||||
type testRun struct {
|
||||
testName string
|
||||
initial admissionRequirementList
|
||||
inputPod *v1.Pod
|
||||
expectedOutput admissionRequirementList
|
||||
}
|
||||
allPods := getTestPods()
|
||||
runs := []testRun{
|
||||
{
|
||||
testName: "subtract a pod from no requirements",
|
||||
initial: getAdmissionRequirementList(0, 0, 0),
|
||||
inputPod: allPods[burstable],
|
||||
expectedOutput: getAdmissionRequirementList(0, 0, 0),
|
||||
},
|
||||
{
|
||||
testName: "subtract no requests from some requirements",
|
||||
initial: getAdmissionRequirementList(100, 100, 1),
|
||||
inputPod: allPods[bestEffort],
|
||||
expectedOutput: getAdmissionRequirementList(100, 100, 0),
|
||||
},
|
||||
{
|
||||
testName: "equal requests and requirements",
|
||||
initial: getAdmissionRequirementList(100, 100, 1),
|
||||
inputPod: allPods[burstable],
|
||||
expectedOutput: getAdmissionRequirementList(0, 0, 0),
|
||||
},
|
||||
{
|
||||
testName: "subtract higher requests than requirements",
|
||||
initial: getAdmissionRequirementList(50, 50, 0),
|
||||
inputPod: allPods[burstable],
|
||||
expectedOutput: getAdmissionRequirementList(0, 0, 0),
|
||||
},
|
||||
{
|
||||
testName: "subtract lower requests than requirements",
|
||||
initial: getAdmissionRequirementList(200, 200, 1),
|
||||
inputPod: allPods[burstable],
|
||||
expectedOutput: getAdmissionRequirementList(100, 100, 0),
|
||||
},
|
||||
}
|
||||
for _, run := range runs {
|
||||
output := run.initial.subtract(run.inputPod)
|
||||
if !admissionRequirementListEqual(output, run.expectedOutput) {
|
||||
t.Errorf("expected: %s, got: %s for %s test", run.expectedOutput.toString(), output.toString(), run.testName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTestPods() map[string]*v1.Pod {
|
||||
allPods := map[string]*v1.Pod{
|
||||
tinyBurstable: getPodWithResources(tinyBurstable, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Mi"),
|
||||
},
|
||||
}),
|
||||
bestEffort: getPodWithResources(bestEffort, v1.ResourceRequirements{}),
|
||||
critical: getPodWithResources(critical, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
}),
|
||||
burstable: getPodWithResources(burstable, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
}),
|
||||
guaranteed: getPodWithResources(guaranteed, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
}),
|
||||
highRequestBurstable: getPodWithResources(highRequestBurstable, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("300m"),
|
||||
v1.ResourceMemory: resource.MustParse("300Mi"),
|
||||
},
|
||||
}),
|
||||
highRequestGuaranteed: getPodWithResources(highRequestGuaranteed, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("300m"),
|
||||
v1.ResourceMemory: resource.MustParse("300Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("300m"),
|
||||
v1.ResourceMemory: resource.MustParse("300Mi"),
|
||||
},
|
||||
}),
|
||||
}
|
||||
allPods[critical].Namespace = kubeapi.NamespaceSystem
|
||||
allPods[critical].Annotations[kubetypes.CriticalPodAnnotationKey] = ""
|
||||
return allPods
|
||||
}
|
||||
|
||||
func getPodWithResources(name string, requests v1.ResourceRequirements) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: name,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("%s-container", name),
|
||||
Resources: requests,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func parseCPUToInt64(res string) int64 {
|
||||
r := resource.MustParse(res)
|
||||
return (&r).MilliValue()
|
||||
}
|
||||
|
||||
func parseNonCpuResourceToInt64(res string) int64 {
|
||||
r := resource.MustParse(res)
|
||||
return (&r).Value()
|
||||
}
|
||||
|
||||
func getAdmissionRequirementList(cpu, memory, pods int) admissionRequirementList {
|
||||
reqs := []*admissionRequirement{}
|
||||
if cpu > 0 {
|
||||
reqs = append(reqs, &admissionRequirement{
|
||||
resourceName: v1.ResourceCPU,
|
||||
quantity: parseCPUToInt64(fmt.Sprintf("%dm", cpu)),
|
||||
})
|
||||
}
|
||||
if memory > 0 {
|
||||
reqs = append(reqs, &admissionRequirement{
|
||||
resourceName: v1.ResourceMemory,
|
||||
quantity: parseNonCpuResourceToInt64(fmt.Sprintf("%dMi", memory)),
|
||||
})
|
||||
}
|
||||
if pods > 0 {
|
||||
reqs = append(reqs, &admissionRequirement{
|
||||
resourceName: v1.ResourcePods,
|
||||
quantity: int64(pods),
|
||||
})
|
||||
}
|
||||
return admissionRequirementList(reqs)
|
||||
}
|
||||
|
||||
// this checks if the lists contents contain all of the same elements.
|
||||
// this is not correct if there are duplicate pods in the list.
|
||||
// for example: podListEqual([a, a, b], [a, b, b]) will return true
|
||||
func admissionRequirementListEqual(list1 admissionRequirementList, list2 admissionRequirementList) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
for _, a := range list1 {
|
||||
contains := false
|
||||
for _, b := range list2 {
|
||||
if a.resourceName == b.resourceName && a.quantity == b.quantity {
|
||||
contains = true
|
||||
}
|
||||
}
|
||||
if !contains {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// podListEqual checks if the lists contents contain all of the same elements.
|
||||
func podListEqual(list1 []*v1.Pod, list2 []*v1.Pod) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
|
||||
m := map[*v1.Pod]int{}
|
||||
for _, val := range list1 {
|
||||
m[val] = m[val] + 1
|
||||
}
|
||||
for _, val := range list2 {
|
||||
m[val] = m[val] - 1
|
||||
}
|
||||
for _, v := range m {
|
||||
if v != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
Reference in New Issue
Block a user