mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
21
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
21
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
@ -23,8 +23,8 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-scheduler/app:go_default_library",
|
||||
"//cmd/kube-scheduler/app/config:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
@ -36,13 +36,13 @@ go_test(
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
@ -51,8 +51,11 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
@ -86,17 +89,29 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
|
63
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
63
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
@ -31,19 +31,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -130,7 +121,7 @@ func (e *Extender) filterUsingNodeCache(args *schedulerapi.ExtenderArgs) (*sched
|
||||
for _, nodeName := range *args.NodeNames {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(&args.Pod,
|
||||
fit, err := predicate(args.Pod,
|
||||
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
|
||||
if err != nil {
|
||||
return &schedulerapi.ExtenderFilterResult{
|
||||
@ -169,7 +160,7 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend
|
||||
for _, node := range args.Nodes.Items {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(&args.Pod, &node)
|
||||
fit, err := predicate(args.Pod, &node)
|
||||
if err != nil {
|
||||
return &schedulerapi.ExtenderFilterResult{
|
||||
Nodes: &v1.NodeList{},
|
||||
@ -217,7 +208,7 @@ func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.Ho
|
||||
continue
|
||||
}
|
||||
priorityFunc := prioritizer.function
|
||||
prioritizedList, err := priorityFunc(&args.Pod, nodes)
|
||||
prioritizedList, err := priorityFunc(args.Pod, nodes)
|
||||
if err != nil {
|
||||
return &schedulerapi.HostPriorityList{}, err
|
||||
}
|
||||
@ -288,13 +279,8 @@ func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostP
|
||||
}
|
||||
|
||||
func TestSchedulerExtender(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
context := initTestMaster(t, "scheduler-extender", nil)
|
||||
clientSet := context.clientSet
|
||||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
@ -361,39 +347,12 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
policy.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
policy.APIVersion = "v1"
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
scheduler, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
|
||||
informerFactory.Start(schedulerConfig.StopEverything)
|
||||
scheduler.Run()
|
||||
context = initTestScheduler(t, context, nil, false, &policy)
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
|
||||
DoTestPodScheduling(ns, t, clientSet)
|
||||
DoTestPodScheduling(context.ns, t, clientSet)
|
||||
}
|
||||
|
||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
@ -430,7 +389,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: e2e.GetPauseImageName(cs),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
extendedResourceName: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
|
112
vendor/k8s.io/kubernetes/test/integration/scheduler/predicates_test.go
generated
vendored
112
vendor/k8s.io/kubernetes/test/integration/scheduler/predicates_test.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// This file tests the scheduler predicates functionality.
|
||||
@ -75,7 +75,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -108,7 +108,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -140,7 +140,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -167,7 +167,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name,
|
||||
},
|
||||
},
|
||||
@ -183,7 +183,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -205,7 +205,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
@ -221,7 +221,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -244,7 +244,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
@ -260,7 +260,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -282,7 +282,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -297,7 +297,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -336,7 +336,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -351,7 +351,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Name: "fakename",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -390,7 +390,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -405,7 +405,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -443,7 +443,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -458,7 +458,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -498,7 +498,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
@ -535,7 +535,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -573,7 +573,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -588,7 +588,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -629,7 +629,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -665,7 +665,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -687,7 +687,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
@ -701,12 +701,12 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Name: "fakename",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}}},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}}},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -741,12 +741,12 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Name: "fake-name",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}}},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}}},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
@ -779,7 +779,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "fake-name2"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeSelector: map[string]string{"region": "r1"},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
@ -803,7 +803,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{Name: "fakename", Labels: map[string]string{"foo": "abc"}}},
|
||||
},
|
||||
fits: false,
|
||||
@ -870,3 +870,53 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodePIDPressure verifies that scheduler's CheckNodePIDPressurePredicate predicate
|
||||
// functions works correctly.
|
||||
func TestNodePIDPressure(t *testing.T) {
|
||||
context := initTest(t, "node-pid-pressure")
|
||||
defer cleanupTest(t, context)
|
||||
// Add a node.
|
||||
node, err := createNode(context.clientSet, "testnode", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create node: %v", err)
|
||||
}
|
||||
|
||||
cs := context.clientSet
|
||||
|
||||
// Adds PID pressure condition to the node.
|
||||
node.Status.Conditions = []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
}
|
||||
|
||||
// Update node condition.
|
||||
err = updateNodeStatus(context.clientSet, node)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot update node: %v", err)
|
||||
}
|
||||
|
||||
// Creats test pod.
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pidpressure-fake-name"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "container", Image: imageutils.GetPauseImageName()},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testPod, err = cs.CoreV1().Pods(context.ns.Name).Create(testPod)
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error: %v, while creating pod", err)
|
||||
}
|
||||
|
||||
err = waitForPodUnschedulable(cs, testPod)
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for scheduled", err)
|
||||
}
|
||||
|
||||
cleanupPods(cs, t, []*v1.Pod{testPod})
|
||||
}
|
||||
|
170
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
170
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -41,8 +41,8 @@ import (
|
||||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
|
||||
func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -57,6 +57,10 @@ func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForNominatedNodeNameWithTimeout(cs, pod, wait.ForeverTestTimeout)
|
||||
}
|
||||
|
||||
// TestPreemption tests a few preemption scenarios.
|
||||
func TestPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
@ -285,6 +289,88 @@ func TestPreemption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
|
||||
func TestDisablePreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
// Initialize scheduler, and disable preemption.
|
||||
context := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
}{
|
||||
{
|
||||
description: "pod preemption will not happen",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
pods[i], err = runPausePod(cs, p)
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Create the "pod".
|
||||
preemptor, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating high priority pod: %v", err)
|
||||
}
|
||||
// Ensure preemptor should keep unschedulable.
|
||||
if err := waitForPodUnschedulable(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: Preemptor %v should not become scheduled",
|
||||
test.description, preemptor.Name)
|
||||
}
|
||||
|
||||
// Ensure preemptor should not be nominated.
|
||||
if err := waitForNominatedNodeNameWithTimeout(cs, preemptor, 5*time.Second); err == nil {
|
||||
t.Errorf("Test [%v]: Preemptor %v should not be nominated",
|
||||
test.description, preemptor.Name)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
}
|
||||
}
|
||||
|
||||
func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
@ -419,7 +505,11 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
cs := context.clientSet
|
||||
|
||||
defer cleanupPodsInNamespace(cs, t, context.ns.Name)
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
@ -496,7 +586,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func mkMinAvailablePDB(name, namespace string, minAvailable int, matchLabels map[string]string) *policy.PodDisruptionBudget {
|
||||
func mkMinAvailablePDB(name, namespace string, uid types.UID, minAvailable int, matchLabels map[string]string) *policy.PodDisruptionBudget {
|
||||
intMinAvailable := intstr.FromInt(minAvailable)
|
||||
return &policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -510,6 +600,18 @@ func mkMinAvailablePDB(name, namespace string, minAvailable int, matchLabels map
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionReady(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestPDBInPreemption tests PodDisruptionBudget support in preemption.
|
||||
func TestPDBInPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
@ -519,6 +621,8 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
initDisruptionController(context)
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)},
|
||||
@ -538,6 +642,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
description string
|
||||
nodes []*nodeConfig
|
||||
pdbs []*policy.PodDisruptionBudget
|
||||
pdbPodNum []int32
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
@ -546,8 +651,9 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
description: "A non-PDB violating pod is preempted despite its higher priority",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{2},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
@ -588,8 +694,9 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
{name: "node-2", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{1},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
@ -626,9 +733,10 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
{name: "node-3", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", context.ns.Name, 2, map[string]string{"foo2": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", context.ns.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{1, 5},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
@ -692,38 +800,22 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.BinarySI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{0: {}, 1: {}},
|
||||
// The third node is chosen because PDB is not violated for node 3 and the victims have lower priority than node-2.
|
||||
preemptedPodIndexes: map[int]struct{}{4: {}, 5: {}, 6: {}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Logf("================ Running test: %v\n", test.description)
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||
}
|
||||
}
|
||||
// Create PDBs.
|
||||
for _, pdb := range test.pdbs {
|
||||
_, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDB: %v", err)
|
||||
}
|
||||
}
|
||||
// Wait for PDBs to show up in the scheduler's cache.
|
||||
if err := wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs) == len(test.pdbs), err
|
||||
}); err != nil {
|
||||
t.Fatalf("Not all PDBs were added to the cache: %v", err)
|
||||
}
|
||||
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
var err error
|
||||
@ -732,7 +824,29 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
// Add pod condition ready so that PDB is updated.
|
||||
addPodConditionReady(p)
|
||||
if _, err := context.clientSet.CoreV1().Pods(context.ns.Name).UpdateStatus(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// Wait for Pods to be stable in scheduler cache.
|
||||
if err := waitCachedPodsStable(context, test.existingPods); err != nil {
|
||||
t.Fatalf("Not all pods are stable in the cache: %v", err)
|
||||
}
|
||||
|
||||
// Create PDBs.
|
||||
for _, pdb := range test.pdbs {
|
||||
_, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDB: %v", err)
|
||||
}
|
||||
}
|
||||
// Wait for PDBs to show up in the scheduler's cache and become stable.
|
||||
if err := waitCachedPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil {
|
||||
t.Fatalf("Not all pdbs are stable in the cache: %v", err)
|
||||
}
|
||||
|
||||
// Create the "pod".
|
||||
preemptor, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
|
180
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
180
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
@ -29,6 +29,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -41,20 +43,18 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const enableEquivalenceCache = true
|
||||
|
||||
type nodeMutationFunc func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface)
|
||||
|
||||
type nodeStateManager struct {
|
||||
@ -88,7 +88,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("configmap", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
@ -135,6 +135,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckVolumeBinding",
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
@ -175,32 +176,34 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
policyConfigMap.APIVersion = "v1"
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
|
||||
config, err := ss.SchedulerConfig()
|
||||
config, err := schedulerapp.NewSchedulerConfig(ss.Complete())
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't make scheduler config: %v", err)
|
||||
}
|
||||
@ -232,34 +235,36 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("configmap", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
|
||||
_, err := ss.SchedulerConfig()
|
||||
_, err := schedulerapp.NewSchedulerConfig(ss.Complete())
|
||||
if err == nil {
|
||||
t.Fatalf("Creation of scheduler didn't fail while the policy ConfigMap didn't exist.")
|
||||
}
|
||||
@ -513,33 +518,18 @@ func TestMultiScheduler(t *testing.T) {
|
||||
}
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
informerFactory2 := informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
podInformer2 := factory.NewPodInformer(context.clientSet, 0, fooScheduler)
|
||||
podInformer2 := factory.NewPodInformer(context.clientSet, 0)
|
||||
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(
|
||||
fooScheduler,
|
||||
clientSet2,
|
||||
informerFactory2.Core().V1().Nodes(),
|
||||
podInformer2,
|
||||
informerFactory2.Core().V1().PersistentVolumes(),
|
||||
informerFactory2.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory2.Core().V1().ReplicationControllers(),
|
||||
informerFactory2.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory2.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory2.Core().V1().Services(),
|
||||
informerFactory2.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory2.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
)
|
||||
schedulerConfigFactory2 := createConfiguratorWithPodInformer(fooScheduler, clientSet2, podInformer2, informerFactory2)
|
||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster2 := record.NewBroadcaster()
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: fooScheduler})
|
||||
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet2.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet2.CoreV1().Events("")})
|
||||
go podInformer2.Informer().Run(schedulerConfig2.StopEverything)
|
||||
informerFactory2.Start(schedulerConfig2.StopEverything)
|
||||
|
||||
@ -686,6 +676,7 @@ func TestPDBCache(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: context.ns.Name,
|
||||
Name: "test-pdb",
|
||||
UID: types.UID("test-pdb-uid"),
|
||||
Labels: map[string]string{"tkey1": "tval1", "tkey2": "tval2"},
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
@ -762,3 +753,96 @@ func TestPDBCache(t *testing.T) {
|
||||
t.Errorf("No PDB was deleted from the cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedulerInformers tests that scheduler receives informer events and updates its cache when
|
||||
// pods are scheduled by other schedulers.
|
||||
func TestSchedulerInformers(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "scheduler-informer")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
}
|
||||
defaultNodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
}
|
||||
|
||||
type nodeConfig struct {
|
||||
name string
|
||||
res *v1.ResourceList
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*nodeConfig
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
}{
|
||||
{
|
||||
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
NodeName: "node-1",
|
||||
SchedulerName: "foo-scheduler",
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
NodeName: "node-1",
|
||||
SchedulerName: "bar-scheduler",
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "unschedulable-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
var err error
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Create the new "pod".
|
||||
unschedulable, err := createPausePod(cs, test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating new pod: %v", err)
|
||||
}
|
||||
if err := waitForPodUnschedulable(cs, unschedulable); err != nil {
|
||||
t.Errorf("Pod %v got scheduled: %v", unschedulable.Name, err)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, unschedulable)
|
||||
cleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
}
|
||||
}
|
||||
|
142
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
142
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
@ -19,8 +19,6 @@ package scheduler
|
||||
// This file tests the Taint feature.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -28,24 +26,18 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// TestTaintNodeByCondition verifies:
|
||||
@ -53,37 +45,41 @@ import (
|
||||
// 2. NodeController taints nodes by node condition
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
|
||||
defer func() {
|
||||
if !enabled {
|
||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
|
||||
}
|
||||
}()
|
||||
// Enable TaintNodeByCondition
|
||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, time.Second)
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
informers := informers.NewSharedInformerFactory(clientset, time.Second)
|
||||
|
||||
// Build PodToleration Admission.
|
||||
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
|
||||
|
||||
context := initTestMaster(t, "default", admission)
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: context.httpServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, time.Second)
|
||||
|
||||
kubeadmission.WantsInternalKubeClientSet(admission).SetInternalKubeClientSet(internalClientset)
|
||||
kubeadmission.WantsInternalKubeInformerFactory(admission).SetInternalKubeInformerFactory(internalInformers)
|
||||
|
||||
// Start master with admission.
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
defer closeFn()
|
||||
|
||||
nsName := "default"
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
|
||||
// Apply feature gates to enable TaintNodesByCondition
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
context = initTestScheduler(t, context, controllerCh, false, nil)
|
||||
clientset := context.clientSet
|
||||
informers := context.informerFactory
|
||||
nsName := context.ns.Name
|
||||
|
||||
// Start NodeLifecycleController for taint.
|
||||
nc, err := nodelifecycle.NewNodeLifecycleController(
|
||||
informers.Core().V1().Pods(),
|
||||
@ -109,42 +105,8 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
}
|
||||
go nc.Run(controllerCh)
|
||||
|
||||
// Apply feature gates to enable TaintNodesByCondition
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
// Start scheduler
|
||||
configurator := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientset,
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().PersistentVolumes(),
|
||||
informers.Core().V1().PersistentVolumeClaims(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
informers.Core().V1().Services(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true, // Enable EqualCache by default.
|
||||
)
|
||||
|
||||
sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
|
||||
cfg.StopEverything = controllerCh
|
||||
cfg.Recorder = &record.FakeRecorder{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create scheduler: %v.", err)
|
||||
return
|
||||
}
|
||||
go sched.Run()
|
||||
|
||||
// Waiting for all controller sync.
|
||||
informers.Start(controllerCh)
|
||||
internalInformers.Start(controllerCh)
|
||||
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
internalInformers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// -------------------------------------------
|
||||
@ -244,6 +206,9 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-1" {
|
||||
return
|
||||
}
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeNetworkUnavailable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
@ -294,4 +259,55 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
t.Errorf("Case 4: Failed to schedule network daemon pod in 60s.")
|
||||
}
|
||||
}
|
||||
|
||||
// Case 5: Taint node by unschedulable condition
|
||||
unschedulableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh2 := make(chan bool)
|
||||
nodeInformer2 := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer2.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-2" {
|
||||
return
|
||||
}
|
||||
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeUnschedulable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh2 <- true
|
||||
break
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(unschedulableNode); err != nil {
|
||||
t.Errorf("Case 5: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 5: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh2:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
321
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
321
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@ -18,16 +18,25 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
@ -35,12 +44,15 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"net/http/httptest"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type TestContext struct {
|
||||
@ -54,50 +66,194 @@ type TestContext struct {
|
||||
scheduler *scheduler.Scheduler
|
||||
}
|
||||
|
||||
// initTest initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
var context TestContext
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, context.httpServer, context.closeFn = framework.RunAMaster(masterConfig)
|
||||
|
||||
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), context.httpServer, t)
|
||||
|
||||
context.clientSet = clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL})
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
podInformer := factory.NewPodInformer(context.clientSet, 12*time.Hour, v1.DefaultSchedulerName)
|
||||
context.schedulerConfigFactory = factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
context.clientSet,
|
||||
context.informerFactory.Core().V1().Nodes(),
|
||||
// createConfiguratorWithPodInformer creates a configurator for scheduler.
|
||||
func createConfiguratorWithPodInformer(
|
||||
schedulerName string,
|
||||
clientSet clientset.Interface,
|
||||
podInformer coreinformers.PodInformer,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
) scheduler.Configurator {
|
||||
return factory.NewConfigFactory(
|
||||
schedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
podInformer,
|
||||
context.informerFactory.Core().V1().PersistentVolumes(),
|
||||
context.informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
context.informerFactory.Core().V1().ReplicationControllers(),
|
||||
context.informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
context.informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
context.informerFactory.Core().V1().Services(),
|
||||
context.informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
context.informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||
// configuration.
|
||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
||||
var context TestContext
|
||||
|
||||
// 1. Create master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
if admission != nil {
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
}
|
||||
|
||||
_, context.httpServer, context.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
|
||||
if nsPrefix != "default" {
|
||||
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
|
||||
} else {
|
||||
context.ns = framework.CreateTestingNamespace("default", s, t)
|
||||
}
|
||||
|
||||
// 2. Create kubeclient
|
||||
context.clientSet = clientset.NewForConfigOrDie(
|
||||
&restclient.Config{
|
||||
QPS: -1, Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"},
|
||||
},
|
||||
},
|
||||
)
|
||||
return &context
|
||||
}
|
||||
|
||||
// initTestScheduler initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTestScheduler(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *TestContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
// configuration and other options.
|
||||
func initTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
disablePreemption bool,
|
||||
) *TestContext {
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
|
||||
// 1. Create scheduler
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, time.Second)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
// create independent pod informer if required
|
||||
if setPodInformer {
|
||||
podInformer = factory.NewPodInformer(context.clientSet, 12*time.Hour)
|
||||
} else {
|
||||
podInformer = context.informerFactory.Core().V1().Pods()
|
||||
}
|
||||
|
||||
context.schedulerConfigFactory = createConfiguratorWithPodInformer(
|
||||
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory)
|
||||
|
||||
var err error
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.Create()
|
||||
|
||||
if policy != nil {
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.CreateFromConfig(*policy)
|
||||
} else {
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.Create()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
|
||||
// set controllerCh if provided.
|
||||
if controllerCh != nil {
|
||||
context.schedulerConfig.StopEverything = controllerCh
|
||||
}
|
||||
|
||||
// set DisablePreemption option
|
||||
context.schedulerConfig.DisablePreemption = disablePreemption
|
||||
|
||||
// set setPodInformer if provided.
|
||||
if setPodInformer {
|
||||
go podInformer.Informer().Run(context.schedulerConfig.StopEverything)
|
||||
controller.WaitForCacheSync("scheduler", context.schedulerConfig.StopEverything, podInformer.Informer().HasSynced)
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(context.clientSet.CoreV1().RESTClient()).Events("")})
|
||||
go podInformer.Informer().Run(context.schedulerConfig.StopEverything)
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
||||
legacyscheme.Scheme,
|
||||
v1.EventSource{Component: v1.DefaultSchedulerName},
|
||||
)
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
||||
Interface: context.clientSet.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
context.informerFactory.Start(context.schedulerConfig.StopEverything)
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: context.schedulerConfig}, nil...)
|
||||
context.informerFactory.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{
|
||||
Config: context.schedulerConfig},
|
||||
nil...)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||
}
|
||||
context.scheduler.Run()
|
||||
return &context
|
||||
return context
|
||||
}
|
||||
|
||||
// initDisruptionController initializes and runs a Disruption Controller to properly
|
||||
// update PodDisuptionBudget objects.
|
||||
func initDisruptionController(context *TestContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour)
|
||||
|
||||
dc := disruption.NewDisruptionController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Extensions().V1beta1().Deployments(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
context.clientSet)
|
||||
|
||||
informers.Start(context.schedulerConfig.StopEverything)
|
||||
informers.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||
go dc.Run(context.schedulerConfig.StopEverything)
|
||||
return dc
|
||||
}
|
||||
|
||||
// initTest initializes a test environment and creates master and scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), nil, true, nil)
|
||||
}
|
||||
|
||||
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
@ -113,7 +269,8 @@ func cleanupTest(t *testing.T, context *TestContext) {
|
||||
|
||||
// waitForReflection waits till the passFunc confirms that the object it expects
|
||||
// to see is in the store. Used to observe reflected events.
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string, passFunc func(n interface{}) bool) error {
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string,
|
||||
passFunc func(n interface{}) bool) error {
|
||||
nodes := []*v1.Node{}
|
||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
n, err := nodeLister.Get(key)
|
||||
@ -185,6 +342,12 @@ func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.
|
||||
return cs.CoreV1().Nodes().Create(n)
|
||||
}
|
||||
|
||||
// updateNodeStatus updates the status of node.
|
||||
func updateNodeStatus(cs clientset.Interface, node *v1.Node) error {
|
||||
_, err := cs.CoreV1().Nodes().UpdateStatus(node)
|
||||
return err
|
||||
}
|
||||
|
||||
// createNodes creates `numNodes` nodes. The created node names will be in the
|
||||
// form of "`prefix`-X" where X is an ordinal.
|
||||
func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, numNodes int) ([]*v1.Node, error) {
|
||||
@ -228,7 +391,7 @@ func initPausePod(cs clientset.Interface, conf *pausePodConfig) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: conf.Name,
|
||||
Image: framework.GetPauseImageName(cs),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
Tolerations: conf.Tolerations,
|
||||
@ -252,7 +415,8 @@ func createPausePod(cs clientset.Interface, p *v1.Pod) (*v1.Pod, error) {
|
||||
// createPausePodWithResource creates a pod with "Pause" image and the given
|
||||
// resources and returns its pointer and error status. The resource list can be
|
||||
// nil.
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string, nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string,
|
||||
nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
var conf pausePodConfig
|
||||
if res == nil {
|
||||
conf = pausePodConfig{
|
||||
@ -346,7 +510,8 @@ func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
|
||||
return false, nil
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
return cond != nil && cond.Status == v1.ConditionFalse &&
|
||||
cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,6 +539,59 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// waitCachedPDBsStable waits for PDBs in scheduler cache to have "CurrentHealthy" status equal to
|
||||
// the expected values.
|
||||
func waitCachedPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(cachedPDBs) != len(pdbs) {
|
||||
return false, nil
|
||||
}
|
||||
for i, pdb := range pdbs {
|
||||
found := false
|
||||
for _, cpdb := range cachedPDBs {
|
||||
if pdb.Name == cpdb.Name && pdb.Namespace == cpdb.Namespace {
|
||||
found = true
|
||||
if cpdb.Status.CurrentHealthy != pdbPodNum[i] {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
||||
func waitCachedPodsStable(context *TestContext, pods []*v1.Pod) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
cachedPods, err := context.scheduler.Config().SchedulerCache.List(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pods) != len(cachedPods) {
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range pods {
|
||||
actualPod, err1 := context.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{})
|
||||
if err1 != nil {
|
||||
return false, err1
|
||||
}
|
||||
cachedPod, err2 := context.scheduler.Config().SchedulerCache.GetPod(actualPod)
|
||||
if err2 != nil || cachedPod == nil {
|
||||
return false, err2
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// deletePod deletes the given pod in the given namespace.
|
||||
func deletePod(cs clientset.Interface, podName string, nsName string) error {
|
||||
return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
@ -388,8 +606,35 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
}
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout,
|
||||
podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// noPodsInNamespace returns true if no pods in the given namespace.
|
||||
func noPodsInNamespace(c clientset.Interface, podNamespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pods, err := c.CoreV1().Pods(podNamespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return len(pods.Items) == 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupPodsInNamespace deletes the pods in the given namespace and waits for them to
|
||||
// be actually deleted.
|
||||
func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
|
||||
if err := cs.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{}); err != nil {
|
||||
t.Errorf("error while listing pod in namespace %v: %v", ns, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout,
|
||||
noPodsInNamespace(cs, ns)); err != nil {
|
||||
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
|
||||
}
|
||||
}
|
||||
|
245
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
245
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
@ -20,8 +20,6 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -33,19 +31,11 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type testConfig struct {
|
||||
@ -75,94 +65,107 @@ const (
|
||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||
)
|
||||
|
||||
type testPV struct {
|
||||
name string
|
||||
scMode storagev1.VolumeBindingMode
|
||||
preboundPVC string
|
||||
node string
|
||||
}
|
||||
|
||||
type testPVC struct {
|
||||
name string
|
||||
scMode storagev1.VolumeBindingMode
|
||||
preboundPV string
|
||||
}
|
||||
|
||||
func TestVolumeBinding(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 2)
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
pvs []*v1.PersistentVolume
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
pvs []*testPV
|
||||
pvcs []*testPVC
|
||||
// Create these, but they should not be bound in the end
|
||||
unboundPvcs []*v1.PersistentVolumeClaim
|
||||
unboundPvs []*v1.PersistentVolume
|
||||
unboundPvcs []*testPVC
|
||||
unboundPvs []*testPV
|
||||
shouldFail bool
|
||||
}{
|
||||
"immediate can bind": {
|
||||
pod: makePod("pod-i-canbind", config.ns, []string{"pvc-i-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-canbind", config.ns, &classImmediate, "")},
|
||||
pvs: []*testPV{{"pv-i-canbind", modeImmediate, "", node1}},
|
||||
pvcs: []*testPVC{{"pvc-i-canbind", modeImmediate, ""}},
|
||||
},
|
||||
"immediate cannot bind": {
|
||||
pod: makePod("pod-i-cannotbind", config.ns, []string{"pvc-i-cannotbind"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-cannotbind", config.ns, &classImmediate, "")},
|
||||
unboundPvcs: []*testPVC{{"pvc-i-cannotbind", modeImmediate, ""}},
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate pvc prebound": {
|
||||
pod: makePod("pod-i-pvc-prebound", config.ns, []string{"pvc-i-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-prebound", config.ns, &classImmediate, "pv-i-pvc-prebound")},
|
||||
pvs: []*testPV{{"pv-i-pvc-prebound", modeImmediate, "", node1}},
|
||||
pvcs: []*testPVC{{"pvc-i-prebound", modeImmediate, "pv-i-pvc-prebound"}},
|
||||
},
|
||||
"immediate pv prebound": {
|
||||
pod: makePod("pod-i-pv-prebound", config.ns, []string{"pvc-i-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns, node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-pv-prebound", config.ns, &classImmediate, "")},
|
||||
pvs: []*testPV{{"pv-i-prebound", modeImmediate, "pvc-i-pv-prebound", node1}},
|
||||
pvcs: []*testPVC{{"pvc-i-pv-prebound", modeImmediate, ""}},
|
||||
},
|
||||
"wait can bind": {
|
||||
pod: makePod("pod-w-canbind", config.ns, []string{"pvc-w-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-canbind", config.ns, &classWait, "")},
|
||||
pvs: []*testPV{{"pv-w-canbind", modeWait, "", node1}},
|
||||
pvcs: []*testPVC{{"pvc-w-canbind", modeWait, ""}},
|
||||
},
|
||||
"wait cannot bind": {
|
||||
pod: makePod("pod-w-cannotbind", config.ns, []string{"pvc-w-cannotbind"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-cannotbind", config.ns, &classWait, "")},
|
||||
unboundPvcs: []*testPVC{{"pvc-w-cannotbind", modeWait, ""}},
|
||||
shouldFail: true,
|
||||
},
|
||||
"wait pvc prebound": {
|
||||
pod: makePod("pod-w-pvc-prebound", config.ns, []string{"pvc-w-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-prebound", config.ns, &classWait, "pv-w-pvc-prebound")},
|
||||
pvs: []*testPV{{"pv-w-pvc-prebound", modeWait, "", node1}},
|
||||
pvcs: []*testPVC{{"pvc-w-prebound", modeWait, "pv-w-pvc-prebound"}},
|
||||
},
|
||||
"wait pv prebound": {
|
||||
pod: makePod("pod-w-pv-prebound", config.ns, []string{"pvc-w-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns, node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-pv-prebound", config.ns, &classWait, "")},
|
||||
pvs: []*testPV{{"pv-w-prebound", modeWait, "pvc-w-pv-prebound", node1}},
|
||||
pvcs: []*testPVC{{"pvc-w-pv-prebound", modeWait, ""}},
|
||||
},
|
||||
"wait can bind two": {
|
||||
pod: makePod("pod-w-canbind-2", config.ns, []string{"pvc-w-canbind-2", "pvc-w-canbind-3"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-2", classWait, "", "", node2),
|
||||
makePV(t, "pv-w-canbind-3", classWait, "", "", node2),
|
||||
pvs: []*testPV{
|
||||
{"pv-w-canbind-2", modeWait, "", node2},
|
||||
{"pv-w-canbind-3", modeWait, "", node2},
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-2", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-canbind-3", config.ns, &classWait, ""),
|
||||
pvcs: []*testPVC{
|
||||
{"pvc-w-canbind-2", modeWait, ""},
|
||||
{"pvc-w-canbind-3", modeWait, ""},
|
||||
},
|
||||
unboundPvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-5", classWait, "", "", node1),
|
||||
unboundPvs: []*testPV{
|
||||
{"pv-w-canbind-5", modeWait, "", node1},
|
||||
},
|
||||
},
|
||||
"wait cannot bind two": {
|
||||
pod: makePod("pod-w-cannotbind-2", config.ns, []string{"pvc-w-cannotbind-1", "pvc-w-cannotbind-2"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-cannotbind-1", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-cannotbind-2", config.ns, &classWait, ""),
|
||||
unboundPvcs: []*testPVC{
|
||||
{"pvc-w-cannotbind-1", modeWait, ""},
|
||||
{"pvc-w-cannotbind-2", modeWait, ""},
|
||||
},
|
||||
unboundPvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-cannotbind-1", classWait, "", "", node2),
|
||||
makePV(t, "pv-w-cannotbind-2", classWait, "", "", node1),
|
||||
unboundPvs: []*testPV{
|
||||
{"pv-w-cannotbind-1", modeWait, "", node2},
|
||||
{"pv-w-cannotbind-2", modeWait, "", node1},
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
"mix immediate and wait": {
|
||||
pod: makePod("pod-mix-bound", config.ns, []string{"pvc-w-canbind-4", "pvc-i-canbind-2"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-4", classWait, "", "", node1),
|
||||
makePV(t, "pv-i-canbind-2", classImmediate, "", "", node1),
|
||||
pvs: []*testPV{
|
||||
{"pv-w-canbind-4", modeWait, "", node1},
|
||||
{"pv-i-canbind-2", modeImmediate, "", node1},
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-4", config.ns, &classWait, ""),
|
||||
makePVC("pvc-i-canbind-2", config.ns, &classImmediate, ""),
|
||||
pvcs: []*testPVC{
|
||||
{"pvc-w-canbind-4", modeWait, ""},
|
||||
{"pvc-i-canbind-2", modeImmediate, ""},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -170,26 +173,41 @@ func TestVolumeBinding(t *testing.T) {
|
||||
for name, test := range cases {
|
||||
glog.Infof("Running test %v", name)
|
||||
|
||||
// Create two StorageClasses
|
||||
suffix := rand.String(4)
|
||||
classes := map[storagev1.VolumeBindingMode]*storagev1.StorageClass{}
|
||||
classes[modeImmediate] = makeStorageClass(fmt.Sprintf("immediate-%v", suffix), &modeImmediate)
|
||||
classes[modeWait] = makeStorageClass(fmt.Sprintf("wait-%v", suffix), &modeWait)
|
||||
for _, sc := range classes {
|
||||
if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVs
|
||||
for _, pv := range test.pvs {
|
||||
for _, pvConfig := range test.pvs {
|
||||
pv := makePV(pvConfig.name, classes[pvConfig.scMode].Name, pvConfig.preboundPVC, config.ns, pvConfig.node)
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pv := range test.unboundPvs {
|
||||
for _, pvConfig := range test.unboundPvs {
|
||||
pv := makePV(pvConfig.name, classes[pvConfig.scMode].Name, pvConfig.preboundPVC, config.ns, pvConfig.node)
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVCs
|
||||
for _, pvc := range test.pvcs {
|
||||
for _, pvcConfig := range test.pvcs {
|
||||
pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scMode].Name, pvcConfig.preboundPV)
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
for _, pvc := range test.unboundPvcs {
|
||||
for _, pvcConfig := range test.unboundPvcs {
|
||||
pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scMode].Name, pvcConfig.preboundPV)
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
@ -211,23 +229,20 @@ func TestVolumeBinding(t *testing.T) {
|
||||
|
||||
// Validate PVC/PV binding
|
||||
for _, pvc := range test.pvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimBound)
|
||||
validatePVCPhase(t, config.client, pvc.name, config.ns, v1.ClaimBound)
|
||||
}
|
||||
for _, pvc := range test.unboundPvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimPending)
|
||||
validatePVCPhase(t, config.client, pvc.name, config.ns, v1.ClaimPending)
|
||||
}
|
||||
for _, pv := range test.pvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeBound)
|
||||
validatePVPhase(t, config.client, pv.name, v1.VolumeBound)
|
||||
}
|
||||
for _, pv := range test.unboundPvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeAvailable)
|
||||
validatePVPhase(t, config.client, pv.name, v1.VolumeAvailable)
|
||||
}
|
||||
|
||||
// TODO: validate events on Pods and PVCs
|
||||
|
||||
config.client.CoreV1().Pods(config.ns).DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
config.client.CoreV1().PersistentVolumeClaims(config.ns).DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
config.client.CoreV1().PersistentVolumes().DeleteCollection(deleteOption, metav1.ListOptions{})
|
||||
// Force delete objects, but they still may not be immediately removed
|
||||
deleteTestObjects(config.client, config.ns, deleteOption)
|
||||
}
|
||||
}
|
||||
|
||||
@ -240,7 +255,7 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for i := 0; i < podLimit*volsPerPod; i++ {
|
||||
pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1)
|
||||
pv := makePV(fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1)
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, &classWait, "")
|
||||
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
@ -271,27 +286,27 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
|
||||
// Validate Pods scheduled
|
||||
for _, pod := range pods {
|
||||
if err := waitForPodToSchedule(config.client, pod); err != nil {
|
||||
// Use increased timeout for stress test because there is a higher chance of
|
||||
// PV sync error
|
||||
if err := waitForPodToScheduleWithTimeout(config.client, pod, 60*time.Second); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate PVC/PV binding
|
||||
for _, pvc := range pvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimBound)
|
||||
validatePVCPhase(t, config.client, pvc.Name, config.ns, v1.ClaimBound)
|
||||
}
|
||||
for _, pv := range pvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeBound)
|
||||
validatePVPhase(t, config.client, pv.Name, v1.VolumeBound)
|
||||
}
|
||||
|
||||
// TODO: validate events on Pods and PVCs
|
||||
}
|
||||
|
||||
func TestPVAffinityConflict(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 3)
|
||||
defer config.teardown()
|
||||
|
||||
pv := makePV(t, "local-pv", classImmediate, "", "", node1)
|
||||
pv := makePV("local-pv", classImmediate, "", "", node1)
|
||||
pvc := makePVC("local-pvc", config.ns, &classImmediate, "")
|
||||
|
||||
// Create PV
|
||||
@ -347,27 +362,17 @@ func TestPVAffinityConflict(t *testing.T) {
|
||||
}
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
// Enable feature gates
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true")
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||
informers := informers.NewSharedInformerFactory(clientset, time.Second)
|
||||
|
||||
// Start master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
ns := framework.CreateTestingNamespace(nsName, s, t).Name
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
|
||||
context := initTestScheduler(t, initTestMaster(t, nsName, nil), controllerCh, false, nil)
|
||||
|
||||
clientset := context.clientSet
|
||||
ns := context.ns.Name
|
||||
informers := context.informerFactory
|
||||
|
||||
// Start PV controller for volume binding.
|
||||
params := persistentvolume.ControllerParameters{
|
||||
KubeClient: clientset,
|
||||
@ -379,7 +384,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
EventRecorder: nil, // TODO: add one so we can test PV events
|
||||
NodeInformer: informers.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: true,
|
||||
}
|
||||
ctrl, err := persistentvolume.NewController(params)
|
||||
@ -388,40 +393,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
}
|
||||
go ctrl.Run(controllerCh)
|
||||
|
||||
// Start scheduler
|
||||
configurator := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
clientset,
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().PersistentVolumes(),
|
||||
informers.Core().V1().PersistentVolumeClaims(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
informers.Core().V1().Services(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true, // Enable EqualCache by default.
|
||||
)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
|
||||
cfg.StopEverything = controllerCh
|
||||
cfg.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientset.CoreV1().RESTClient()).Events("")})
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create scheduler: %v.", err)
|
||||
}
|
||||
|
||||
go sched.Run()
|
||||
|
||||
// Waiting for all controller sync.
|
||||
informers.Start(controllerCh)
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// Create shared objects
|
||||
// Create nodes
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
@ -466,18 +437,20 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
ns: ns,
|
||||
stop: controllerCh,
|
||||
teardown: func() {
|
||||
clientset.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{})
|
||||
clientset.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
close(controllerCh)
|
||||
closeFn()
|
||||
deleteTestObjects(clientset, ns, nil)
|
||||
cleanupTest(t, context)
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func deleteTestObjects(client clientset.Interface, ns string, option *metav1.DeleteOptions) {
|
||||
client.CoreV1().Pods(ns).DeleteCollection(option, metav1.ListOptions{})
|
||||
client.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(option, metav1.ListOptions{})
|
||||
client.CoreV1().PersistentVolumes().DeleteCollection(option, metav1.ListOptions{})
|
||||
client.StorageV1().StorageClasses().DeleteCollection(option, metav1.ListOptions{})
|
||||
}
|
||||
|
||||
func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
|
||||
return &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -488,7 +461,7 @@ func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1
|
||||
}
|
||||
}
|
||||
|
||||
func makePV(t *testing.T, name, scName, pvcName, ns, node string) *v1.PersistentVolume {
|
||||
func makePV(name, scName, pvcName, ns, node string) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -585,25 +558,25 @@ func makePod(name, ns string, pvcs []string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func validatePVCPhase(t *testing.T, client clientset.Interface, pvc *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase) {
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
func validatePVCPhase(t *testing.T, client clientset.Interface, pvcName string, ns string, phase v1.PersistentVolumeClaimPhase) {
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get PVC %v/%v: %v", pvc.Namespace, pvc.Name, err)
|
||||
t.Errorf("Failed to get PVC %v/%v: %v", ns, pvcName, err)
|
||||
}
|
||||
|
||||
if claim.Status.Phase != phase {
|
||||
t.Errorf("PVC %v/%v phase not %v, got %v", pvc.Namespace, pvc.Name, phase, claim.Status.Phase)
|
||||
t.Errorf("PVC %v/%v phase not %v, got %v", ns, pvcName, phase, claim.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
func validatePVPhase(t *testing.T, client clientset.Interface, pv *v1.PersistentVolume, phase v1.PersistentVolumePhase) {
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
func validatePVPhase(t *testing.T, client clientset.Interface, pvName string, phase v1.PersistentVolumePhase) {
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get PV %v: %v", pv.Name, err)
|
||||
t.Errorf("Failed to get PV %v: %v", pvName, err)
|
||||
}
|
||||
|
||||
if pv.Status.Phase != phase {
|
||||
t.Errorf("PV %v phase not %v, got %v", pv.Name, phase, pv.Status.Phase)
|
||||
t.Errorf("PV %v phase not %v, got %v", pvName, phase, pv.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user