Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -1,4 +1,4 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
@ -11,62 +11,48 @@ go_library(
"predicates.go",
"preemption.go",
"priorities.go",
"rescheduler.go",
"resource_quota.go",
"taint_based_evictions.go",
"taints.go",
"ubernetes_lite.go",
"ubernetes_lite_volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/scheduling",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/scheduling:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/quota/v1/evaluator/core:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["taints_test.go"],
embed = [":go_default_library"],
deps = [
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)

View File

@ -35,6 +35,10 @@ import (
_ "github.com/stretchr/testify/assert"
)
const (
defaultTimeout = 3 * time.Minute
)
var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
@ -42,7 +46,6 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("equivalence-cache")
ignoreLabels := framework.ImagePullerLabels
BeforeEach(func() {
cs = f.ClientSet
@ -56,7 +59,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
@ -65,7 +68,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
}
}
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {

View File

@ -18,6 +18,7 @@ package scheduling
import (
"fmt"
"reflect"
"time"
"k8s.io/api/core/v1"
@ -81,6 +82,7 @@ var _ = SIGDescribe("LimitRange", func() {
By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual)
@ -140,6 +142,13 @@ var _ = SIGDescribe("LimitRange", func() {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
Expect(err).NotTo(HaveOccurred())
By("Verifying LimitRange updating is effective")
Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
})).NotTo(HaveOccurred())
By("Creating a Pod with less than former min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)

View File

@ -18,7 +18,6 @@ package scheduling
import (
"os"
"strings"
"time"
"k8s.io/api/core/v1"
@ -35,7 +34,6 @@ import (
const (
testPodNamePrefix = "nvidia-gpu-"
cosOSImage = "Container-Optimized OS from Google"
// Nvidia driver installation can take upwards of 5 minutes.
driverInstallTimeout = 10 * time.Minute
)
@ -69,15 +67,12 @@ func makeCudaAdditionDevicePluginTestPod() *v1.Pod {
return testPod
}
func isClusterRunningCOS(f *framework.Framework) bool {
func logOSImages(f *framework.Framework) {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
if !strings.Contains(node.Status.NodeInfo.OSImage, cosOSImage) {
return false
}
framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
}
return true
}
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
@ -103,7 +98,7 @@ func getGPUsAvailable(f *framework.Framework) int64 {
framework.ExpectNoError(err, "getting node list")
var gpusAvailable int64
for _, node := range nodeList.Items {
if val, ok := node.Status.Capacity[gpuResourceName]; ok {
if val, ok := node.Status.Allocatable[gpuResourceName]; ok {
gpusAvailable += (&val).Value()
}
}
@ -111,14 +106,7 @@ func getGPUsAvailable(f *framework.Framework) int64 {
}
func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer {
// Skip the test if the base image is not COS.
// TODO: Add support for other base images.
// CUDA apps require host mounts which is not portable across base images (yet).
framework.Logf("Checking base image")
if !isClusterRunningCOS(f) {
Skip("Nvidia GPU tests are supproted only on Container Optimized OS image currently")
}
framework.Logf("Cluster is running on COS. Proceeding with test")
logOSImages(f)
dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
if dsYamlUrlFromEnv != "" {
@ -149,7 +137,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
var rsgather *framework.ContainerResourceGatherer
if setupResourceGatherer {
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, MasterOnly: false, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData()
}
@ -163,7 +151,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
return rsgather
}
func testNvidiaGPUsOnCOS(f *framework.Framework) {
func testNvidiaGPUs(f *framework.Framework) {
rsgather := SetupNVIDIAGPUNode(f, true)
framework.Logf("Creating as many pods as there are Nvidia GPUs and have the pods run a CUDA app")
podList := []*v1.Pod{}
@ -186,7 +174,7 @@ func testNvidiaGPUsOnCOS(f *framework.Framework) {
var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus")
It("run Nvidia GPU Device Plugin tests on Container Optimized OS only", func() {
testNvidiaGPUsOnCOS(f)
It("run Nvidia GPU Device Plugin tests", func() {
testNvidiaGPUs(f)
})
})

View File

@ -26,8 +26,8 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
utilversion "k8s.io/apimachinery/pkg/util/version"
clientset "k8s.io/client-go/kubernetes"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
@ -56,17 +56,16 @@ type pausePodConfig struct {
Ports []v1.ContainerPort
OwnerReferences []metav1.OwnerReference
PriorityClassName string
DeletionGracePeriodSeconds *int64
}
var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var systemPodsNo int
var totalPodCapacity int64
var RCName string
var ns string
f := framework.NewDefaultFramework("sched-pred")
ignoreLabels := framework.ImagePullerLabels
AfterEach(func() {
rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
@ -82,30 +81,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
framework.WaitForAllNodesHealthy(cs, time.Minute)
framework.AllNodesReady(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
systemPodsNo++
}
}
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodsSuccess(cs, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.PrintAllKubeletPods(cs, node.Name)
@ -239,9 +220,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
// 5. Make sure this additional pod is not scheduled.
/*
Testname: scheduler-resource-limits
Description: Ensure that scheduler accounts node resources correctly
and respects pods' resource requirements during scheduling.
Release : v1.9
Testname: Scheduler, resource limits
Description: Scheduling Pods MUST fail if the resource limits exceed Machine capacity.
*/
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
framework.WaitForStableCluster(cs, masterNodes)
@ -345,9 +326,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// nonempty Selector set.
/*
Testname: scheduler-node-selector-not-matching
Description: Ensure that scheduler respects the NodeSelector field of
PodSpec during scheduling (when it does not match any node).
Release : v1.9
Testname: Scheduler, node selector not matching
Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled.
*/
framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
@ -368,9 +349,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
})
/*
Testname: scheduler-node-selector-matching
Description: Ensure that scheduler respects the NodeSelector field
of PodSpec during scheduling (when it matches).
Release : v1.9
Testname: Scheduler, node selector matching
Description: Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node.
*/
framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() {
nodeName := GetNodeThatCanRunPod(f)
@ -651,6 +632,9 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
if conf.Resources != nil {
pod.Spec.Containers[0].Resources = *conf.Resources
}
if conf.DeletionGracePeriodSeconds != nil {
pod.ObjectMeta.DeletionGracePeriodSeconds = conf.DeletionGracePeriodSeconds
}
return pod
}

View File

@ -34,7 +34,7 @@ import (
_ "github.com/stretchr/testify/assert"
)
var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func() {
var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var ns string
@ -175,6 +175,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
})
// Make sure that the lowest priority pod is deleted.
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
defer func() {
// Clean-up the critical pod
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
podDeleted := (err != nil && errors.IsNotFound(err)) ||
(err == nil && preemptedPod.DeletionTimestamp != nil)
Expect(podDeleted).To(BeTrue())
@ -184,9 +189,6 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
// Clean-up the critical pod
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
// This test verifies that when a high priority pod is pending and its
@ -315,7 +317,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
})
})
var _ = SIGDescribe("PodPriorityResolution [Serial] [Feature:PodPreemption]", func() {
var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("sched-pod-priority")
@ -341,11 +343,13 @@ var _ = SIGDescribe("PodPriorityResolution [Serial] [Feature:PodPreemption]", fu
Namespace: metav1.NamespaceSystem,
PriorityClassName: spc,
})
defer func() {
// Clean-up the pod.
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
Expect(pod.Spec.Priority).NotTo(BeNil())
framework.Logf("Created pod: %v", pod.Name)
// Clean-up the pod.
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
})
})

View File

@ -63,7 +63,6 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("sched-priority")
ignoreLabels := framework.ImagePullerLabels
AfterEach(func() {
})
@ -78,11 +77,11 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
})
It("Pod should be schedule to node that don't match the PodAntiAffinity terms", func() {
It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() {
By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{
Name: "pod-with-label-security-s1",
@ -143,7 +142,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
Expect(labelPod.Spec.NodeName).NotTo(Equal(nodeName))
})
It("Pod should avoid to schedule to node that have avoidPod annotation", func() {
It("Pod should avoid nodes that have avoidPod annotation", func() {
nodeName := nodeList.Items[0].Name
// make the nodes have balanced cpu,mem usage
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
@ -206,7 +205,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
}
})
It("Pod should perfer to scheduled to nodes pod can tolerate", func() {
It("Pod should be preferably scheduled to nodes pod can tolerate", func() {
// make the nodes have balanced cpu,mem usage ratio
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err)

View File

@ -1,133 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTimeout = 3 * time.Minute
)
// This test requires Rescheduler to be enabled.
var _ = SIGDescribe("Rescheduler [Serial]", func() {
f := framework.NewDefaultFramework("rescheduler")
var ns string
var totalMillicores int
BeforeEach(func() {
framework.Skipf("Rescheduler is being deprecated soon in Kubernetes 1.10")
ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount := len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
totalMillicores = int((&cpu).MilliValue()) * nodeCount
})
It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "reserve-all-cpu")
framework.ExpectNoError(err)
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(listOpts)
framework.ExpectNoError(err)
Expect(len(deployments.Items)).Should(Equal(1))
deployment := deployments.Items[0]
replicas := uint(*(deployment.Spec.Replicas))
err = framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
framework.ExpectNoError(err)
})
})
func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
timeout := 5 * time.Minute
replicas := millicores / 100
reserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
if err != nil {
return err
}
if len(pods) != replicas {
continue
}
allRunningOrUnschedulable := true
for _, pod := range pods {
if !podRunningOrUnschedulable(pod) {
allRunningOrUnschedulable = false
break
}
}
if allRunningOrUnschedulable {
return nil
}
}
return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas)
}
func podRunningOrUnschedulable(pod *v1.Pod) bool {
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true
}
running, _ := testutils.PodRunningReady(pod)
return running
}
func reserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}

View File

@ -22,13 +22,14 @@ import (
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/quota/evaluator/core"
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -364,6 +365,22 @@ var _ = SIGDescribe("ResourceQuota", func() {
})
It("should create a ResourceQuota and capture the life of a configMap.", func() {
found, unchanged := 0, 0
wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
if len(configmaps.Items) == found {
// loop until the number of configmaps has stabilized for 5 seconds
unchanged++
return unchanged > 4, nil
}
unchanged = 0
found = len(configmaps.Items)
return false, nil
})
defaultConfigMaps := fmt.Sprintf("%d", found)
hardConfigMaps := fmt.Sprintf("%d", found+1)
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
@ -373,6 +390,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@ -384,7 +402,10 @@ var _ = SIGDescribe("ResourceQuota", func() {
By("Ensuring resource quota status captures configMap creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourceConfigMaps] = resource.MustParse("1")
// we expect there to be two configmaps because each namespace will receive
// a ca.crt configmap by default.
// ref:https://github.com/kubernetes/kubernetes/pull/68812
usedResources[v1.ResourceConfigMaps] = resource.MustParse(hardConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@ -393,7 +414,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourceConfigMaps] = resource.MustParse("0")
usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
@ -731,8 +752,539 @@ var _ = SIGDescribe("ResourceQuota", func() {
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
f := framework.NewDefaultFramework("scope-selectors")
It("should verify ResourceQuota with best effort scope using scope-selectors.", func() {
By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a not best-effort pod")
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota with terminating scopes through scope selectors.", func() {
By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a long running pod")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := v1.ResourceList{}
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a terminating pod")
podName = "terminating-pod"
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope ignored the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
f := framework.NewDefaultFramework("resourcequota-priorityclass")
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass1"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class")
podName := "testpod-pclass1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass2"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating first pod with priority class should pass")
podName := "testpod-pclass2-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating 2nd pod with priority class should fail")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
Expect(err).To(HaveOccurred())
By("Deleting first pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass4"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class with pclass3")
podName := "testpod-pclass3-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope remains same")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a 2nd pod with priority class pclass3")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope remains same")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod2.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("2")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass5", "pclass6"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class pclass5")
podName := "testpod-pclass5"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating 2nd pod with priority class pclass6")
podName2 := "testpod-pclass6"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("2")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod2.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpNotIn, []string{"pclass7"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class pclass7")
podName := "testpod-pclass7"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class is not used")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpExists, []string{}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class pclass8")
podName := "testpod-pclass8"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class is updated with the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&v1beta1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("1")
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("3")
hard[v1.ResourceLimitsMemory] = resource.MustParse("3Gi")
By("Creating a ResourceQuota with priority class scope")
resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass9"}))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a pod with priority class")
podName := "testpod-pclass9"
request := v1.ResourceList{}
request[v1.ResourceCPU] = resource.MustParse("1")
request[v1.ResourceMemory] = resource.MustParse("1Gi")
limit := v1.ResourceList{}
limit[v1.ResourceCPU] = resource.MustParse("2")
limit[v1.ResourceMemory] = resource.MustParse("2Gi")
pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with priority class scope captures the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("1")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("2")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("2Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
// newTestResourceQuotaWithScopeSelector returns a quota that enforces default constraints for testing with scopeSelectors
func newTestResourceQuotaWithScopeSelector(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourcePods] = resource.MustParse("5")
switch scope {
case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
}
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard,
ScopeSelector: &v1.ScopeSelector{
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
{
ScopeName: scope,
Operator: v1.ScopeSelectorOpExists},
},
},
},
}
}
// newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := v1.ResourceList{}
@ -750,6 +1302,25 @@ func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1
}
}
// newTestResourceQuotaWithScopeForPriorityClass returns a quota
// that enforces default constraints for testing with ResourceQuotaScopePriorityClass scope
func newTestResourceQuotaWithScopeForPriorityClass(name string, hard v1.ResourceList, op v1.ScopeSelectorOperator, values []string) *v1.ResourceQuota {
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard,
ScopeSelector: &v1.ScopeSelector{
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
{
ScopeName: v1.ResourceQuotaScopePriorityClass,
Operator: op,
Values: values,
},
},
},
},
}
}
// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing feature LocalStorageCapacityIsolation
func newTestResourceQuotaForEphemeralStorage(name string) *v1.ResourceQuota {
hard := v1.ResourceList{}
@ -810,6 +1381,28 @@ func newTestPodForQuota(f *framework.Framework, name string, requests v1.Resourc
}
}
// newTestPodForQuotaWithPriority returns a pod that has the specified requests, limits and priority class
func newTestPodForQuotaWithPriority(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList, pclass string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
PriorityClassName: pclass,
},
}
}
// newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{

View File

@ -0,0 +1,193 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"errors"
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
func newUnreachableNoExecuteTaint() *v1.Taint {
return &v1.Taint{
Key: schedulerapi.TaintNodeUnreachable,
Effect: v1.TaintEffectNoExecute,
}
}
func getTolerationSeconds(tolerations []v1.Toleration) (int64, error) {
for _, t := range tolerations {
if t.Key == schedulerapi.TaintNodeUnreachable && t.Effect == v1.TaintEffectNoExecute && t.Operator == v1.TolerationOpExists {
return *t.TolerationSeconds, nil
}
}
return 0, errors.New("cannot find toleration")
}
var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
f := framework.NewDefaultFramework("sched-taint-based-evictions")
var cs clientset.Interface
var ns string
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
// skip if TaintBasedEvictions is not enabled
// TODO(Huang-Wei): remove this when TaintBasedEvictions is GAed
framework.SkipUnlessTaintBasedEvictionsEnabled()
// it's required to run on a cluster that has more than 1 node
// otherwise node lifecycle manager enters a fully disruption mode
framework.SkipUnlessNodeCountIsAtLeast(2)
})
// This test verifies that when a node becomes unreachable
// 1. node lifecycle manager generate a status change: [NodeReady=true, status=ConditionUnknown]
// 1. it's applied with node.kubernetes.io/unreachable=:NoExecute taint
// 2. pods without toleration are applied with toleration with tolerationSeconds=300
// 3. pods with toleration and without tolerationSeconds won't be modifed, and won't be evicted
// 4. pods with toleration and with tolerationSeconds won't be modified, and will be evicted after tolerationSeconds
// When network issue recovers, it's expected to see:
// 5. node lifecycle manager generate a status change: [NodeReady=true, status=ConditionTrue]
// 6. node.kubernetes.io/unreachable=:NoExecute taint is taken off the node
It("Checks that the node becomes unreachable", func() {
// find an available node
nodeName := GetNodeThatCanRunPod(f)
By("Finding an available node " + nodeName)
// pod0 is a pod with unschedulable=:NoExecute toleration, and tolerationSeconds=0s
// pod1 is a pod with unschedulable=:NoExecute toleration, and tolerationSeconds=200s
// pod2 is a pod without any toleration
base := "taint-based-eviction"
tolerationSeconds := []int64{0, 200}
numPods := len(tolerationSeconds) + 1
By(fmt.Sprintf("Preparing %v pods", numPods))
pods := make([]*v1.Pod, numPods)
zero := int64(0)
// build pod0, pod1
for i := 0; i < numPods-1; i++ {
pods[i] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("%v-%v", base, i),
NodeName: nodeName,
Tolerations: []v1.Toleration{
{
Key: schedulerapi.TaintNodeUnreachable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
TolerationSeconds: &tolerationSeconds[i],
},
},
DeletionGracePeriodSeconds: &zero,
})
}
// build pod2
pods[numPods-1] = createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("%v-%v", base, numPods-1),
NodeName: nodeName,
})
By("Verifying all pods are running properly")
for _, pod := range pods {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
}
// get the node API object
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: nodeSelector.String()})
if err != nil || len(nodeList.Items) != 1 {
framework.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items))
}
node := nodeList.Items[0]
By(fmt.Sprintf("Blocking traffic from node %s to the master", nodeName))
host, err := framework.GetNodeExternalIP(&node)
// TODO(Huang-Wei): make this case work for local provider
// if err != nil {
// host, err = framework.GetNodeInternalIP(&node)
// }
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(cs)
taint := newUnreachableNoExecuteTaint()
defer func() {
By(fmt.Sprintf("Unblocking traffic from node %s to the master", node.Name))
for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed {
framework.Failf("Current e2e test has failed, so return from here.")
return
}
By(fmt.Sprintf("Expecting to see node %q becomes Ready", nodeName))
framework.WaitForNodeToBeReady(cs, nodeName, time.Minute*1)
By("Expecting to see unreachable=:NoExecute taint is taken off")
err := framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, false, time.Second*30)
framework.ExpectNoError(err)
}()
for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
if !framework.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
}
By("Expecting to see unreachable=:NoExecute taint is applied")
err = framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, true, time.Second*30)
framework.ExpectNoError(err)
By("Expecting pod0 to be evicted immediately")
err = framework.WaitForPodCondition(cs, ns, pods[0].Name, "pod0 terminating", time.Second*15, func(pod *v1.Pod) (bool, error) {
// as node is unreachable, pod0 is expected to be in Terminating status
// rather than getting deleted
if pod.DeletionTimestamp != nil {
return true, nil
}
return false, nil
})
framework.ExpectNoError(err)
By("Expecting pod2 to be updated with a toleration with tolerationSeconds=300")
err = framework.WaitForPodCondition(cs, ns, pods[2].Name, "pod2 updated with tolerationSeconds=300", time.Second*15, func(pod *v1.Pod) (bool, error) {
if seconds, err := getTolerationSeconds(pod.Spec.Tolerations); err == nil {
return seconds == 300, nil
}
return false, nil
})
framework.ExpectNoError(err)
By("Expecting pod1 to be unchanged")
livePod1, err := cs.CoreV1().Pods(pods[1].Namespace).Get(pods[1].Name, metav1.GetOptions{})
framework.ExpectNoError(err)
seconds, err := getTolerationSeconds(livePod1.Spec.Tolerations)
framework.ExpectNoError(err)
if seconds != 200 {
framework.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds)
}
})
})

View File

@ -20,7 +20,7 @@ import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
@ -47,21 +47,21 @@ func getTestTaint() v1.Taint {
// Creates a defaut pod for this test, with argument saying if the Pod should have
// toleration for Taits used in this test.
func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, ns string) *v1.Pod {
func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, podLabel, ns string) *v1.Pod {
grace := int64(1)
if !hasToleration {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: map[string]string{"name": podName},
Name: podName,
Namespace: ns,
Labels: map[string]string{"group": podLabel},
DeletionGracePeriodSeconds: &grace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: "kubernetes/pause",
Image: "k8s.gcr.io/pause:3.1",
},
},
},
@ -70,9 +70,9 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
if tolerationSeconds <= 0 {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: map[string]string{"name": podName},
Name: podName,
Namespace: ns,
Labels: map[string]string{"group": podLabel},
DeletionGracePeriodSeconds: &grace,
// default - tolerate forever
},
@ -80,7 +80,7 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
Containers: []v1.Container{
{
Name: "pause",
Image: "kubernetes/pause",
Image: "k8s.gcr.io/pause:3.1",
},
},
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}},
@ -90,16 +90,16 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
ts := int64(tolerationSeconds)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: map[string]string{"name": podName},
Name: podName,
Namespace: ns,
Labels: map[string]string{"group": podLabel},
DeletionGracePeriodSeconds: &grace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: "kubernetes/pause",
Image: "k8s.gcr.io/pause:3.1",
},
},
// default - tolerate forever
@ -112,23 +112,29 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
// Creates and starts a controller (informer) that watches updates on a pod in given namespace with given name. It puts a new
// struct into observedDeletion channel for every deletion it sees.
func createTestController(cs clientset.Interface, observedDeletions chan struct{}, stopCh chan struct{}, podName, ns string) {
func createTestController(cs clientset.Interface, observedDeletions chan string, stopCh chan struct{}, podLabel, ns string) {
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
options.LabelSelector = labels.SelectorFromSet(labels.Set{"group": podLabel}).String()
obj, err := cs.CoreV1().Pods(ns).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
options.LabelSelector = labels.SelectorFromSet(labels.Set{"group": podLabel}).String()
return cs.CoreV1().Pods(ns).Watch(options)
},
},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
DeleteFunc: func(oldObj interface{}) { observedDeletions <- struct{}{} },
DeleteFunc: func(oldObj interface{}) {
if delPod, ok := oldObj.(*v1.Pod); ok {
observedDeletions <- delPod.Name
} else {
observedDeletions <- ""
}
},
},
)
framework.Logf("Starting informer...")
@ -145,10 +151,10 @@ const (
// - lack of eviction of tolerating pods from a tainted node,
// - delayed eviction of short-tolerating pod from a tainted node,
// - lack of eviction of short-tolerating pod after taint removal.
var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("taint-control")
f := framework.NewDefaultFramework("taint-single-pod")
BeforeEach(func() {
cs = f.ClientSet
@ -165,12 +171,12 @@ var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
// 3. See if pod will get evicted
It("evicts pods from tainted nodes", func() {
podName := "taint-eviction-1"
pod := createPodForTaintsTest(false, 0, podName, ns)
observedDeletions := make(chan struct{}, 100)
pod := createPodForTaintsTest(false, 0, podName, podName, ns)
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Staring pod...")
By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
@ -197,12 +203,12 @@ var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
// 3. See if pod won't get evicted
It("doesn't evict pod with tolerations from tainted nodes", func() {
podName := "taint-eviction-2"
pod := createPodForTaintsTest(true, 0, podName, ns)
observedDeletions := make(chan struct{}, 100)
pod := createPodForTaintsTest(true, 0, podName, podName, ns)
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Staring pod...")
By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
@ -230,12 +236,12 @@ var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
// 4. See if pod will get evicted after toleration time runs out
It("eventually evict pod with finite tolerations from tainted nodes", func() {
podName := "taint-eviction-3"
pod := createPodForTaintsTest(true, KubeletPodDeletionDelaySeconds+2*AdditionalWaitPerDeleteSeconds, podName, ns)
observedDeletions := make(chan struct{}, 100)
pod := createPodForTaintsTest(true, KubeletPodDeletionDelaySeconds+2*AdditionalWaitPerDeleteSeconds, podName, podName, ns)
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Staring pod...")
By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
@ -274,12 +280,12 @@ var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
// 5. See if Pod won't be evicted.
It("removing taint cancels eviction", func() {
podName := "taint-eviction-4"
pod := createPodForTaintsTest(true, 2*AdditionalWaitPerDeleteSeconds, podName, ns)
observedDeletions := make(chan struct{}, 100)
pod := createPodForTaintsTest(true, 2*AdditionalWaitPerDeleteSeconds, podName, podName, ns)
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns)
By("Staring pod...")
By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
@ -318,3 +324,119 @@ var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
}
})
})
var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("taint-multiple-pods")
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
framework.WaitForAllNodesHealthy(cs, time.Minute)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// 1. Run two pods; one with toleration, one without toleration
// 2. Taint the nodes running those pods with a no-execute taint
// 3. See if pod-without-toleration get evicted, and pod-with-toleration is kept
It("only evicts pods without tolerations from tainted nodes", func() {
podGroup := "taint-eviction-a"
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podGroup, ns)
pod1 := createPodForTaintsTest(false, 0, podGroup+"1", podGroup, ns)
pod2 := createPodForTaintsTest(true, 0, podGroup+"2", podGroup, ns)
By("Starting pods...")
nodeName1, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod1 is running on %v. Tainting Node", nodeName1)
nodeName2, err := testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod2 is running on %v. Tainting Node", nodeName2)
By("Trying to apply a taint on the Nodes")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName1, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName1, testTaint)
if nodeName2 != nodeName1 {
framework.AddOrUpdateTaintOnNode(cs, nodeName2, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName2, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName2, testTaint)
}
// Wait a bit
By("Waiting for Pod1 to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
var evicted int
for {
select {
case <-timeoutChannel:
if evicted == 0 {
framework.Failf("Failed to evict Pod1.")
} else if evicted == 2 {
framework.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.")
}
return
case podName := <-observedDeletions:
evicted++
if podName == podGroup+"1" {
framework.Logf("Noticed Pod %q gets evicted.", podName)
} else if podName == podGroup+"2" {
framework.Failf("Unexepected Pod %q gets evicted.", podName)
return
}
}
}
})
// 1. Run two pods both with toleration; one with tolerationSeconds=5, the other with 25
// 2. Taint the nodes running those pods with a no-execute taint
// 3. See if both pods get evicted in between [5, 25] seconds
It("evicts pods with minTolerationSeconds", func() {
podGroup := "taint-eviction-b"
observedDeletions := make(chan string, 100)
stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podGroup, ns)
pod1 := createPodForTaintsTest(true, AdditionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns)
pod2 := createPodForTaintsTest(true, 5*AdditionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns)
By("Starting pods...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod1 is running on %v. Tainting Node", nodeName)
// ensure pod2 lands on the same node as pod1
pod2.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": nodeName}
_, err = testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod2 is running on %v. Tainting Node", nodeName)
By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit
By("Waiting for Pod1 and Pod2 to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C
var evicted int
for evicted != 2 {
select {
case <-timeoutChannel:
framework.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted)
return
case podName := <-observedDeletions:
framework.Logf("Noticed Pod %q gets evicted.", podName)
evicted++
}
}
})
})

View File

@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@ -110,9 +109,9 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.ClientSet)
zoneNames, err := framework.GetClusterZones(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
}
// Find the name of the zone in which a Node is running
@ -126,25 +125,9 @@ func getZoneNameForNode(node v1.Node) (string, error) {
node.Name, kubeletapis.LabelZoneFailureDomain)
}
// TODO (verult) Merge with framework.GetClusterZones()
// Find the names of all zones in which we have nodes in this cluster.
func getZoneNames(c clientset.Interface) ([]string, error) {
zoneNames := sets.NewString()
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
zoneName, err := getZoneNameForNode(node)
Expect(err).NotTo(HaveOccurred())
zoneNames.Insert(zoneName)
}
return zoneNames.List(), nil
}
// Return the number of zones in which we have nodes in this cluster.
func getZoneCount(c clientset.Interface) (int, error) {
zoneNames, err := getZoneNames(c)
zoneNames, err := framework.GetClusterZones(c)
if err != nil {
return -1, err
}
@ -239,7 +222,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.ClientSet)
zoneNames, err := framework.GetClusterZones(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
}

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
)
var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
@ -59,7 +60,7 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
// Get all the zones that the nodes are in
@ -185,9 +186,9 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
c := f.ClientSet
ns := f.Namespace.Name
zones, err := getZoneNames(c)
zones, err := framework.GetClusterZones(c)
Expect(err).NotTo(HaveOccurred())
zonelist := zones.List()
By("Creating static PVs across zones")
configs := make([]*staticPVTestConfig, podCount)
for i := range configs {
@ -208,7 +209,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
}()
for i, config := range configs {
zone := zones[i%len(zones)]
zone := zonelist[i%len(zones)]
config.pvSource, err = framework.CreatePVSource(zone)
Expect(err).NotTo(HaveOccurred())