Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -25,47 +25,46 @@ go_test(
"//cmd/kube-scheduler/app:go_default_library",
"//cmd/kube-scheduler/app/config:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/componentconfig:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/volume/persistentvolume:go_default_library",
"//pkg/controller/volume/persistentvolume/options:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubeapiserver/admission:go_default_library",
"//pkg/scheduler:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithmprovider:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/factory:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//test/integration/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -96,26 +95,27 @@ go_library(
"//pkg/scheduler/algorithmprovider:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/factory:go_default_library",
"//pkg/util/taints:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//test/integration/framework:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)

View File

@ -349,7 +349,7 @@ func TestSchedulerExtender(t *testing.T) {
}
policy.APIVersion = "v1"
context = initTestScheduler(t, context, nil, false, &policy)
context = initTestScheduler(t, context, false, &policy)
defer cleanupTest(t, context)
DoTestPodScheduling(context.ns, t, clientSet)

View File

@ -36,7 +36,7 @@ import (
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
"k8s.io/klog"
)
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
@ -52,7 +52,7 @@ func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, ti
}
return false, err
}); err != nil {
return fmt.Errorf("Pod %v annotation did not get set: %v", pod.Name, err)
return fmt.Errorf("Pod %v/%v annotation did not get set: %v", pod.Namespace, pod.Name, err)
}
return nil
}
@ -268,7 +268,7 @@ func TestPreemption(t *testing.T) {
for i, p := range pods {
if _, found := test.preemptedPodIndexes[i]; found {
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Test [%v]: Pod %v is not getting evicted.", test.description, p.Name)
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name)
}
} else {
if p.DeletionTimestamp != nil {
@ -450,7 +450,7 @@ func TestPreemptionStarvation(t *testing.T) {
// make sure that runningPods are all scheduled.
for _, p := range runningPods {
if err := waitForPodToSchedule(cs, p); err != nil {
t.Fatalf("Pod %v didn't get scheduled: %v", p.Name, err)
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
}
}
// Create pending pods.
@ -464,7 +464,7 @@ func TestPreemptionStarvation(t *testing.T) {
for _, p := range pendingPods {
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout,
podUnschedulable(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Pod %v didn't get marked unschedulable: %v", p.Name, err)
t.Errorf("Pod %v/%v didn't get marked unschedulable: %v", p.Namespace, p.Name, err)
}
}
// Create the preemptor.
@ -474,14 +474,14 @@ func TestPreemptionStarvation(t *testing.T) {
}
// Check that the preemptor pod gets the annotation for nominated node name.
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
}
// Make sure that preemptor is scheduled after preemptions.
if err := waitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
}
// Cleanup
glog.Info("Cleaning up all pods...")
klog.Info("Cleaning up all pods...")
allPods := pendingPods
allPods = append(allPods, runningPods...)
allPods = append(allPods, preemptor)
@ -532,7 +532,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
// make sure that the pods are all scheduled.
for _, p := range lowPriPods {
if err := waitForPodToSchedule(cs, p); err != nil {
t.Fatalf("Pod %v didn't get scheduled: %v", p.Name, err)
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
}
}
// Step 2. Create a medium priority pod.
@ -551,7 +551,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
}
// Step 3. Check that nominated node name of the medium priority pod is set.
if err := waitForNominatedNodeName(cs, medPriPod); err != nil {
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", medPriPod.Namespace, medPriPod.Name, err)
}
// Step 4. Create a high priority pod.
podConf = initPausePod(cs, &pausePodConfig{
@ -569,7 +569,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
}
// Step 5. Check that nominated node name of the high priority pod is set.
if err := waitForNominatedNodeName(cs, highPriPod); err != nil {
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", medPriPod.Namespace, medPriPod.Name, err)
}
// And the nominated node name of the medium priority pod is cleared.
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
@ -842,8 +842,8 @@ func TestPDBInPreemption(t *testing.T) {
t.Fatalf("Failed to create PDB: %v", err)
}
}
// Wait for PDBs to show up in the scheduler's cache and become stable.
if err := waitCachedPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil {
// Wait for PDBs to become stable.
if err := waitForPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil {
t.Fatalf("Not all pdbs are stable in the cache: %v", err)
}
@ -856,18 +856,18 @@ func TestPDBInPreemption(t *testing.T) {
for i, p := range pods {
if _, found := test.preemptedPodIndexes[i]; found {
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Test [%v]: Pod %v is not getting evicted.", test.description, p.Name)
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name)
}
} else {
if p.DeletionTimestamp != nil {
t.Errorf("Test [%v]: Didn't expect pod %v to get preempted.", test.description, p.Name)
t.Errorf("Test [%v]: Didn't expect pod %v/%v to get preempted.", test.description, p.Namespace, p.Name)
}
}
}
// Also check that the preemptor pod gets the annotation for nominated node name.
if len(test.preemptedPodIndexes) > 0 {
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
}
}

View File

@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testutils "k8s.io/kubernetes/test/utils"
"strings"
)
// This file tests the scheduler priority functions.
@ -172,3 +173,64 @@ func TestPodAffinity(t *testing.T) {
}
t.Errorf("Pod %v got scheduled on an unexpected node: %v.", podName, pod.Spec.NodeName)
}
// TestImageLocality verifies that the scheduler's image locality priority function
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
func TestImageLocality(t *testing.T) {
context := initTest(t, "image-locality")
defer cleanupTest(t, context)
// Add a few nodes.
_, err := createNodes(context.clientSet, "testnode", nil, 10)
if err != nil {
t.Fatalf("cannot create nodes: %v", err)
}
// We use a fake large image as the test image used by the pod, which has relatively large image size.
image := v1.ContainerImage{
Names: []string{
"fake-large-image:v1",
},
SizeBytes: 3000 * 1024 * 1024,
}
// Create a node with the large image
nodeWithLargeImage, err := createNodeWithImages(context.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
if err != nil {
t.Fatalf("cannot create node with a large image: %v", err)
}
// Create a pod with containers each having the specified image.
podName := "pod-using-large-image"
pod, err := runPodWithContainers(context.clientSet, initPodWithContainers(context.clientSet, &podWithContainersConfig{
Name: podName,
Namespace: context.ns.Name,
Containers: makeContainersWithImages(image.Names),
}))
if err != nil {
t.Fatalf("error running pod with images: %v", err)
}
if pod.Spec.NodeName != nodeWithLargeImage.Name {
t.Errorf("pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, nodeWithLargeImage.Name)
} else {
t.Logf("pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
}
}
// makeContainerWithImage returns a list of v1.Container objects for each given image. Duplicates of an image are ignored,
// i.e., each image is used only once.
func makeContainersWithImages(images []string) []v1.Container {
var containers []v1.Container
usedImages := make(map[string]struct{})
for _, image := range images {
if _, ok := usedImages[image]; !ok {
containers = append(containers, v1.Container{
Name: strings.Replace(image, ":", "-", -1) + "-container",
Image: image,
})
usedImages[image] = struct{}{}
}
}
return containers
}

View File

@ -20,19 +20,13 @@ package scheduler
import (
"fmt"
"reflect"
"testing"
"time"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
@ -45,11 +39,11 @@ import (
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/pkg/scheduler/factory"
"k8s.io/kubernetes/test/integration/framework"
@ -140,6 +134,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
"GeneralPredicates",
"MatchInterPodAffinity",
"MaxAzureDiskVolumeCount",
"MaxCSIVolumeCountPred",
"MaxEBSVolumeCount",
"MaxGCEPDVolumeCount",
"NoDiskConflict",
@ -154,6 +149,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
"NodePreferAvoidPodsPriority",
"SelectorSpreadPriority",
"TaintTolerationPriority",
"ImageLocalityPriority",
),
},
{
@ -173,7 +169,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
configPolicyName := fmt.Sprintf("scheduler-custom-policy-config-%d", i)
policyConfigMap := v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
Data: map[string]string{kubeschedulerconfig.SchedulerPolicyConfigMapKey: test.policy},
}
policyConfigMap.APIVersion = "v1"
@ -182,18 +178,20 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
defaultBindTimeout := int64(30)
ss := &schedulerappconfig.Config{
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
SchedulerName: v1.DefaultSchedulerName,
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
Policy: &componentconfig.SchedulerPolicySource{
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
Policy: &kubeschedulerconfig.SchedulerPolicySource{
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
Namespace: policyConfigMap.Namespace,
Name: policyConfigMap.Name,
},
},
},
BindTimeoutSeconds: &defaultBindTimeout,
},
Client: clientSet,
InformerFactory: informerFactory,
@ -243,18 +241,20 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
defaultBindTimeout := int64(30)
ss := &schedulerappconfig.Config{
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
SchedulerName: v1.DefaultSchedulerName,
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
Policy: &componentconfig.SchedulerPolicySource{
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
Policy: &kubeschedulerconfig.SchedulerPolicySource{
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
Namespace: "non-existent-config",
Name: "non-existent-config",
},
},
},
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
BindTimeoutSeconds: &defaultBindTimeout,
},
Client: clientSet,
InformerFactory: informerFactory,
@ -522,7 +522,10 @@ func TestMultiScheduler(t *testing.T) {
informerFactory2 := informers.NewSharedInformerFactory(context.clientSet, 0)
podInformer2 := factory.NewPodInformer(context.clientSet, 0)
schedulerConfigFactory2 := createConfiguratorWithPodInformer(fooScheduler, clientSet2, podInformer2, informerFactory2)
stopCh := make(chan struct{})
defer close(stopCh)
schedulerConfigFactory2 := createConfiguratorWithPodInformer(fooScheduler, clientSet2, podInformer2, informerFactory2, stopCh)
schedulerConfig2, err := schedulerConfigFactory2.Create()
if err != nil {
t.Errorf("Couldn't create scheduler config: %v", err)
@ -530,12 +533,11 @@ func TestMultiScheduler(t *testing.T) {
eventBroadcaster2 := record.NewBroadcaster()
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: fooScheduler})
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet2.CoreV1().Events("")})
go podInformer2.Informer().Run(schedulerConfig2.StopEverything)
informerFactory2.Start(schedulerConfig2.StopEverything)
go podInformer2.Informer().Run(stopCh)
informerFactory2.Start(stopCh)
sched2, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig2}, nil...)
sched2.Run()
defer close(schedulerConfig2.StopEverything)
// 6. **check point-2**:
// - testPodWithAnnotationFitsFoo should be scheduled
@ -665,95 +667,6 @@ func TestAllocatable(t *testing.T) {
}
}
// TestPDBCache verifies that scheduler cache works as expected when handling
// PodDisruptionBudget.
func TestPDBCache(t *testing.T) {
context := initTest(t, "pdbcache")
defer cleanupTest(t, context)
intstrMin := intstr.FromInt(4)
pdb := &policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Namespace: context.ns.Name,
Name: "test-pdb",
UID: types.UID("test-pdb-uid"),
Labels: map[string]string{"tkey1": "tval1", "tkey2": "tval2"},
},
Spec: policy.PodDisruptionBudgetSpec{
MinAvailable: &intstrMin,
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"tkey": "tvalue"}},
},
}
createdPDB, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
if err != nil {
t.Errorf("Failed to create PDB: %v", err)
}
// Wait for PDB to show up in the scheduler's cache.
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
if err != nil {
t.Errorf("Error while polling for PDB: %v", err)
return false, err
}
return len(cachedPDBs) > 0, err
}); err != nil {
t.Fatalf("No PDB was added to the cache: %v", err)
}
// Read PDB from the cache and compare it.
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
if len(cachedPDBs) != 1 {
t.Fatalf("Expected to have 1 pdb in cache, but found %d.", len(cachedPDBs))
}
if !reflect.DeepEqual(createdPDB, cachedPDBs[0]) {
t.Errorf("Got different PDB than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(createdPDB, cachedPDBs[0]))
}
// Update PDB and change its labels.
pdbCopy := *cachedPDBs[0]
pdbCopy.Labels = map[string]string{}
updatedPDB, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Update(&pdbCopy)
if err != nil {
t.Errorf("Failed to update PDB: %v", err)
}
// Wait for PDB to be updated in the scheduler's cache.
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
if err != nil {
t.Errorf("Error while polling for PDB: %v", err)
return false, err
}
return len(cachedPDBs[0].Labels) == 0, err
}); err != nil {
t.Fatalf("No PDB was updated in the cache: %v", err)
}
// Read PDB from the cache and compare it.
cachedPDBs, err = context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
if len(cachedPDBs) != 1 {
t.Errorf("Expected to have 1 pdb in cache, but found %d.", len(cachedPDBs))
}
if !reflect.DeepEqual(updatedPDB, cachedPDBs[0]) {
t.Errorf("Got different PDB than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(updatedPDB, cachedPDBs[0]))
}
// Delete PDB.
err = context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Delete(pdb.Name, &metav1.DeleteOptions{})
if err != nil {
t.Errorf("Failed to delete PDB: %v", err)
}
// Wait for PDB to be deleted from the scheduler's cache.
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
if err != nil {
t.Errorf("Error while polling for PDB: %v", err)
return false, err
}
return len(cachedPDBs) == 0, err
}); err != nil {
t.Errorf("No PDB was deleted from the cache: %v", err)
}
}
// TestSchedulerInformers tests that scheduler receives informer events and updates its cache when
// pods are scheduled by other schedulers.
func TestSchedulerInformers(t *testing.T) {

View File

@ -19,7 +19,7 @@ package scheduler
// This file tests the Taint feature.
import (
"reflect"
"fmt"
"testing"
"time"
@ -28,22 +28,38 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
)
// TestTaintNodeByCondition verifies:
// 1. MemoryPressure Toleration is added to non-BestEffort Pod by PodTolerationRestriction
// 2. NodeController taints nodes by node condition
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable
func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: nsName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "busybox",
Image: "busybox",
Resources: v1.ResourceRequirements{
Requests: req,
Limits: limit,
},
},
},
},
}
}
// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature.
func TestTaintNodeByCondition(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
defer func() {
@ -60,34 +76,32 @@ func TestTaintNodeByCondition(t *testing.T) {
context := initTestMaster(t, "default", admission)
// Build clientset and informers for controllers.
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
QPS: -1,
Host: context.httpServer.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, time.Second)
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
kubeadmission.WantsInternalKubeClientSet(admission).SetInternalKubeClientSet(internalClientset)
kubeadmission.WantsInternalKubeInformerFactory(admission).SetInternalKubeInformerFactory(internalInformers)
controllerCh := make(chan struct{})
defer close(controllerCh)
admission.SetExternalKubeClientSet(externalClientset)
admission.SetExternalKubeInformerFactory(externalInformers)
// Apply feature gates to enable TaintNodesByCondition
algorithmprovider.ApplyFeatureGates()
context = initTestScheduler(t, context, controllerCh, false, nil)
clientset := context.clientSet
context = initTestScheduler(t, context, false, nil)
cs := context.clientSet
informers := context.informerFactory
nsName := context.ns.Name
// Start NodeLifecycleController for taint.
nc, err := nodelifecycle.NewNodeLifecycleController(
informers.Coordination().V1beta1().Leases(),
informers.Core().V1().Pods(),
informers.Core().V1().Nodes(),
informers.Extensions().V1beta1().DaemonSets(),
nil, // CloudProvider
clientset,
time.Second, // Node monitor grace period
cs,
time.Hour, // Node monitor grace period
time.Second, // Node startup grace period
time.Second, // Node monitor period
time.Second, // Pod eviction timeout
@ -103,92 +117,380 @@ func TestTaintNodeByCondition(t *testing.T) {
t.Errorf("Failed to create node controller: %v", err)
return
}
go nc.Run(controllerCh)
go nc.Run(context.stopCh)
// Waiting for all controller sync.
internalInformers.Start(controllerCh)
internalInformers.WaitForCacheSync(controllerCh)
externalInformers.Start(context.stopCh)
externalInformers.WaitForCacheSync(context.stopCh)
informers.Start(context.stopCh)
informers.WaitForCacheSync(context.stopCh)
// -------------------------------------------
// Test TaintNodeByCondition feature.
// -------------------------------------------
memoryPressureToleration := v1.Toleration{
Key: algorithm.TaintNodeMemoryPressure,
nodeRes := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4000m"),
v1.ResourceMemory: resource.MustParse("16Gi"),
v1.ResourcePods: resource.MustParse("110"),
}
podRes := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
}
notReadyToleration := v1.Toleration{
Key: schedulerapi.TaintNodeNotReady,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
// Case 1: Add MememoryPressure Toleration for non-BestEffort pod.
burstablePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "burstable-pod",
Namespace: nsName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
unreachableToleration := v1.Toleration{
Key: schedulerapi.TaintNodeUnreachable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
unschedulableToleration := v1.Toleration{
Key: schedulerapi.TaintNodeUnschedulable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
outOfDiskToleration := v1.Toleration{
Key: schedulerapi.TaintNodeOutOfDisk,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
memoryPressureToleration := v1.Toleration{
Key: schedulerapi.TaintNodeMemoryPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
diskPressureToleration := v1.Toleration{
Key: schedulerapi.TaintNodeDiskPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
networkUnavailableToleration := v1.Toleration{
Key: schedulerapi.TaintNodeNetworkUnavailable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
pidPressureToleration := v1.Toleration{
Key: schedulerapi.TaintNodePIDPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
bestEffortPod := newPod(nsName, "besteffort-pod", nil, nil)
burstablePod := newPod(nsName, "burstable-pod", podRes, nil)
guaranteePod := newPod(nsName, "guarantee-pod", podRes, podRes)
type podCase struct {
pod *v1.Pod
tolerations []v1.Toleration
fits bool
}
// switch to table driven testings
tests := []struct {
name string
existingTaints []v1.Taint
nodeConditions []v1.NodeCondition
unschedulable bool
expectedTaints []v1.Taint
pods []podCase
}{
{
name: "not-ready node",
nodeConditions: []v1.NodeCondition{
{
Name: "busybox",
Image: "busybox",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
Type: v1.NodeReady,
Status: v1.ConditionFalse,
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeNotReady,
Effect: v1.TaintEffectNoSchedule,
},
},
pods: []podCase{
{
pod: bestEffortPod,
fits: false,
},
{
pod: burstablePod,
fits: false,
},
{
pod: guaranteePod,
fits: false,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{notReadyToleration},
fits: true,
},
},
},
{
name: "unreachable node",
existingTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeUnreachable,
Effect: v1.TaintEffectNoSchedule,
},
},
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown, // node status is "Unknown"
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeUnreachable,
Effect: v1.TaintEffectNoSchedule,
},
},
pods: []podCase{
{
pod: bestEffortPod,
fits: false,
},
{
pod: burstablePod,
fits: false,
},
{
pod: guaranteePod,
fits: false,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{unreachableToleration},
fits: true,
},
},
},
{
name: "unschedulable node",
unschedulable: true, // node.spec.unschedulable = true
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
},
},
pods: []podCase{
{
pod: bestEffortPod,
fits: false,
},
{
pod: burstablePod,
fits: false,
},
{
pod: guaranteePod,
fits: false,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{unschedulableToleration},
fits: true,
},
},
},
{
name: "out of disk node",
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionTrue,
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeOutOfDisk,
Effect: v1.TaintEffectNoSchedule,
},
},
// In OutOfDisk condition, only pods with toleration can be scheduled.
pods: []podCase{
{
pod: bestEffortPod,
fits: false,
},
{
pod: burstablePod,
fits: false,
},
{
pod: guaranteePod,
fits: false,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{outOfDiskToleration},
fits: true,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{diskPressureToleration},
fits: false,
},
},
},
{
name: "memory pressure node",
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionTrue,
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeMemoryPressure,
Effect: v1.TaintEffectNoSchedule,
},
},
// In MemoryPressure condition, both Burstable and Guarantee pods are scheduled;
// BestEffort pod with toleration are also scheduled.
pods: []podCase{
{
pod: bestEffortPod,
fits: false,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{memoryPressureToleration},
fits: true,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{diskPressureToleration},
fits: false,
},
{
pod: burstablePod,
fits: true,
},
{
pod: guaranteePod,
fits: true,
},
},
},
{
name: "disk pressure node",
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionTrue,
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeDiskPressure,
Effect: v1.TaintEffectNoSchedule,
},
},
// In DiskPressure condition, only pods with toleration can be scheduled.
pods: []podCase{
{
pod: bestEffortPod,
fits: false,
},
{
pod: burstablePod,
fits: false,
},
{
pod: guaranteePod,
fits: false,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{diskPressureToleration},
fits: true,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{memoryPressureToleration},
fits: false,
},
},
},
{
name: "network unavailable and node is ready",
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeNetworkUnavailable,
Effect: v1.TaintEffectNoSchedule,
},
},
pods: []podCase{
{
pod: bestEffortPod,
fits: false,
},
{
pod: burstablePod,
fits: false,
},
{
pod: guaranteePod,
fits: false,
},
{
pod: burstablePod,
tolerations: []v1.Toleration{
networkUnavailableToleration,
},
fits: true,
},
},
},
}
burstablePodInServ, err := clientset.CoreV1().Pods(nsName).Create(burstablePod)
if err != nil {
t.Errorf("Case 1: Failed to create pod: %v", err)
} else if !reflect.DeepEqual(burstablePodInServ.Spec.Tolerations, []v1.Toleration{memoryPressureToleration}) {
t.Errorf("Case 1: Unexpected toleration of non-BestEffort pod, expected: %+v, got: %v",
[]v1.Toleration{memoryPressureToleration},
burstablePodInServ.Spec.Tolerations)
}
// Case 2: No MemoryPressure Toleration for BestEffort pod.
besteffortPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "best-effort-pod",
Namespace: nsName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "busybox",
Image: "busybox",
},
},
},
}
besteffortPodInServ, err := clientset.CoreV1().Pods(nsName).Create(besteffortPod)
if err != nil {
t.Errorf("Case 2: Failed to create pod: %v", err)
} else if len(besteffortPodInServ.Spec.Tolerations) != 0 {
t.Errorf("Case 2: Unexpected toleration # of BestEffort pod, expected: 0, got: %v",
len(besteffortPodInServ.Spec.Tolerations))
}
// Case 3: Taint Node by NetworkUnavailable condition.
networkUnavailableNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node-1",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4000m"),
v1.ResourceMemory: resource.MustParse("16Gi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4000m"),
v1.ResourceMemory: resource.MustParse("16Gi"),
v1.ResourcePods: resource.MustParse("110"),
},
Conditions: []v1.NodeCondition{
{
name: "network unavailable and node is not ready",
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
@ -198,116 +500,175 @@ func TestTaintNodeByCondition(t *testing.T) {
Status: v1.ConditionFalse,
},
},
},
}
nodeInformerCh := make(chan bool)
nodeInformer := informers.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, cur interface{}) {
curNode := cur.(*v1.Node)
if curNode.Name != "node-1" {
return
}
for _, taint := range curNode.Spec.Taints {
if taint.Key == algorithm.TaintNodeNetworkUnavailable &&
taint.Effect == v1.TaintEffectNoSchedule {
nodeInformerCh <- true
break
}
}
},
})
if _, err := clientset.CoreV1().Nodes().Create(networkUnavailableNode); err != nil {
t.Errorf("Case 3: Failed to create node: %v", err)
} else {
select {
case <-time.After(60 * time.Second):
t.Errorf("Case 3: Failed to taint node after 60s.")
case <-nodeInformerCh:
}
}
// Case 4: Schedule Pod with NetworkUnavailable toleration.
networkDaemonPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "network-daemon-pod",
Namespace: nsName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
expectedTaints: []v1.Taint{
{
Name: "busybox",
Image: "busybox",
Key: schedulerapi.TaintNodeNetworkUnavailable,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: schedulerapi.TaintNodeNotReady,
Effect: v1.TaintEffectNoSchedule,
},
},
Tolerations: []v1.Toleration{
pods: []podCase{
{
Key: algorithm.TaintNodeNetworkUnavailable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
pod: bestEffortPod,
fits: false,
},
{
pod: burstablePod,
fits: false,
},
{
pod: guaranteePod,
fits: false,
},
{
pod: burstablePod,
tolerations: []v1.Toleration{
networkUnavailableToleration,
},
fits: false,
},
{
pod: burstablePod,
tolerations: []v1.Toleration{
networkUnavailableToleration,
notReadyToleration,
},
fits: true,
},
},
},
{
name: "pid pressure node",
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodePIDPressure,
Status: v1.ConditionTrue,
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodePIDPressure,
Effect: v1.TaintEffectNoSchedule,
},
},
pods: []podCase{
{
pod: bestEffortPod,
fits: false,
},
{
pod: burstablePod,
fits: false,
},
{
pod: guaranteePod,
fits: false,
},
{
pod: bestEffortPod,
tolerations: []v1.Toleration{pidPressureToleration},
fits: true,
},
},
},
{
name: "multi taints on node",
nodeConditions: []v1.NodeCondition{
{
Type: v1.NodePIDPressure,
Status: v1.ConditionTrue,
},
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionTrue,
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionTrue,
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
expectedTaints: []v1.Taint{
{
Key: schedulerapi.TaintNodeDiskPressure,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: schedulerapi.TaintNodeMemoryPressure,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: schedulerapi.TaintNodePIDPressure,
Effect: v1.TaintEffectNoSchedule,
},
},
},
}
if _, err := clientset.CoreV1().Pods(nsName).Create(networkDaemonPod); err != nil {
t.Errorf("Case 4: Failed to create pod for network daemon: %v", err)
} else {
if err := waitForPodToScheduleWithTimeout(clientset, networkDaemonPod, time.Second*60); err != nil {
t.Errorf("Case 4: Failed to schedule network daemon pod in 60s.")
}
}
// Case 5: Taint node by unschedulable condition
unschedulableNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node-2",
},
Spec: v1.NodeSpec{
Unschedulable: true,
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4000m"),
v1.ResourceMemory: resource.MustParse("16Gi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4000m"),
v1.ResourceMemory: resource.MustParse("16Gi"),
v1.ResourcePods: resource.MustParse("110"),
},
},
}
nodeInformerCh2 := make(chan bool)
nodeInformer2 := informers.Core().V1().Nodes().Informer()
nodeInformer2.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, cur interface{}) {
curNode := cur.(*v1.Node)
if curNode.Name != "node-2" {
return
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node-1",
},
Spec: v1.NodeSpec{
Unschedulable: test.unschedulable,
Taints: test.existingTaints,
},
Status: v1.NodeStatus{
Capacity: nodeRes,
Allocatable: nodeRes,
Conditions: test.nodeConditions,
},
}
for _, taint := range curNode.Spec.Taints {
if taint.Key == algorithm.TaintNodeUnschedulable &&
taint.Effect == v1.TaintEffectNoSchedule {
nodeInformerCh2 <- true
break
if _, err := cs.CoreV1().Nodes().Create(node); err != nil {
t.Errorf("Failed to create node, err: %v", err)
}
if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil {
t.Errorf("Failed to taint node <%s>, err: %v", node.Name, err)
}
var pods []*v1.Pod
for i, p := range test.pods {
pod := p.pod.DeepCopy()
pod.Name = fmt.Sprintf("%s-%d", pod.Name, i)
pod.Spec.Tolerations = p.tolerations
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
if err != nil {
t.Fatalf("Failed to create pod %s/%s, error: %v",
pod.Namespace, pod.Name, err)
}
pods = append(pods, createdPod)
if p.fits {
if err := waitForPodToSchedule(cs, createdPod); err != nil {
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
pod.Namespace, pod.Name, err)
}
} else {
if err := waitForPodUnschedulable(cs, createdPod); err != nil {
t.Errorf("Unschedulable pod %s/%s gets scheduled on the node, err: %v",
pod.Namespace, pod.Name, err)
}
}
}
},
})
if _, err := clientset.CoreV1().Nodes().Create(unschedulableNode); err != nil {
t.Errorf("Case 5: Failed to create node: %v", err)
} else {
select {
case <-time.After(60 * time.Second):
t.Errorf("Case 5: Failed to taint node after 60s.")
case <-nodeInformerCh2:
}
cleanupPods(cs, t, pods)
cleanupNodes(cs, t)
waitForSchedulerCacheCleanup(context.scheduler, t)
})
}
}

View File

@ -51,6 +51,7 @@ import (
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/pkg/scheduler/factory"
taintutils "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/test/integration/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -61,9 +62,10 @@ type TestContext struct {
ns *v1.Namespace
clientSet *clientset.Clientset
informerFactory informers.SharedInformerFactory
schedulerConfigFactory scheduler.Configurator
schedulerConfig *scheduler.Config
schedulerConfigFactory factory.Configurator
schedulerConfig *factory.Config
scheduler *scheduler.Scheduler
stopCh chan struct{}
}
// createConfiguratorWithPodInformer creates a configurator for scheduler.
@ -72,30 +74,36 @@ func createConfiguratorWithPodInformer(
clientSet clientset.Interface,
podInformer coreinformers.PodInformer,
informerFactory informers.SharedInformerFactory,
) scheduler.Configurator {
return factory.NewConfigFactory(
schedulerName,
clientSet,
informerFactory.Core().V1().Nodes(),
podInformer,
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Storage().V1().StorageClasses(),
v1.DefaultHardPodAffinitySymmetricWeight,
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
false,
)
stopCh <-chan struct{},
) factory.Configurator {
return factory.NewConfigFactory(&factory.ConfigFactoryArgs{
SchedulerName: schedulerName,
Client: clientSet,
NodeInformer: informerFactory.Core().V1().Nodes(),
PodInformer: podInformer,
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
ServiceInformer: informerFactory.Core().V1().Services(),
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
EnableEquivalenceClassCache: utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
DisablePreemption: false,
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
BindTimeoutSeconds: 600,
StopCh: stopCh,
})
}
// initTestMasterAndScheduler initializes a test environment and creates a master with default
// configuration.
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
var context TestContext
context := TestContext{
stopCh: make(chan struct{}),
}
// 1. Create master
h := &framework.MasterHolder{Initialized: make(chan struct{})}
@ -135,13 +143,12 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
func initTestScheduler(
t *testing.T,
context *TestContext,
controllerCh chan struct{},
setPodInformer bool,
policy *schedulerapi.Policy,
) *TestContext {
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
// feature gate is enabled at the same time.
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, false, true, time.Second)
}
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
@ -149,19 +156,21 @@ func initTestScheduler(
func initTestSchedulerWithOptions(
t *testing.T,
context *TestContext,
controllerCh chan struct{},
setPodInformer bool,
policy *schedulerapi.Policy,
disablePreemption bool,
disableEquivalenceCache bool,
resyncPeriod time.Duration,
) *TestContext {
// Enable EnableEquivalenceClassCache for all integration tests.
defer utilfeaturetesting.SetFeatureGateDuringTest(
t,
utilfeature.DefaultFeatureGate,
features.EnableEquivalenceClassCache, true)()
if !disableEquivalenceCache {
defer utilfeaturetesting.SetFeatureGateDuringTest(
t,
utilfeature.DefaultFeatureGate,
features.EnableEquivalenceClassCache, true)()
}
// 1. Create scheduler
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, time.Second)
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
var podInformer coreinformers.PodInformer
@ -173,7 +182,7 @@ func initTestSchedulerWithOptions(
}
context.schedulerConfigFactory = createConfiguratorWithPodInformer(
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory)
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory, context.stopCh)
var err error
@ -187,11 +196,6 @@ func initTestSchedulerWithOptions(
t.Fatalf("Couldn't create scheduler config: %v", err)
}
// set controllerCh if provided.
if controllerCh != nil {
context.schedulerConfig.StopEverything = controllerCh
}
// set DisablePreemption option
context.schedulerConfig.DisablePreemption = disablePreemption
@ -246,21 +250,21 @@ func initDisruptionController(context *TestContext) *disruption.DisruptionContro
// initTest initializes a test environment and creates master and scheduler with default
// configuration.
func initTest(t *testing.T, nsPrefix string) *TestContext {
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), nil, true, nil)
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), true, nil)
}
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
// configuration but with pod preemption disabled.
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
return initTestSchedulerWithOptions(
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
t, initTestMaster(t, nsPrefix, nil), true, nil, true, true, time.Second)
}
// cleanupTest deletes the scheduler and the test namespace. It should be called
// at the end of a test.
func cleanupTest(t *testing.T, context *TestContext) {
// Kill the scheduler.
close(context.schedulerConfig.StopEverything)
close(context.stopCh)
// Cleanup nodes.
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
framework.DeleteTestingNamespace(context.ns, context.httpServer, t)
@ -322,24 +326,35 @@ func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[strin
return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels))
}
// createNode creates a node with the given resource list and
// returns a pointer and error status. If 'res' is nil, a predefined amount of
// initNode returns a node with the given resource list and images. If 'res' is nil, a predefined amount of
// resource will be used.
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1.Node {
// if resource is nil, we use a default amount of resources for the node.
if res == nil {
res = &v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
}
}
n := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.NodeSpec{Unschedulable: false},
Status: v1.NodeStatus{
Capacity: *res,
Images: images,
},
}
return cs.CoreV1().Nodes().Create(n)
return n
}
// createNode creates a node with the given resource list.
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
return cs.CoreV1().Nodes().Create(initNode(name, res, nil))
}
// createNodeWithImages creates a node with the given resource list and images.
func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) {
return cs.CoreV1().Nodes().Create(initNode(name, res, images))
}
// updateNodeStatus updates the status of node.
@ -363,6 +378,44 @@ func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, nu
return nodes[:], nil
}
// nodeTainted return a condition function that returns true if the given node contains
// the taints.
func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc {
return func() (bool, error) {
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
if len(taints) != len(node.Spec.Taints) {
return false, nil
}
for _, taint := range taints {
if !taintutils.TaintExists(node.Spec.Taints, &taint) {
return false, nil
}
}
return true, nil
}
}
// waitForNodeTaints waits for a node to have the target taints and returns
// an error if it does not have taints within the given timeout.
func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
return wait.Poll(100*time.Millisecond, 30*time.Second, nodeTainted(cs, node.Name, taints))
}
// cleanupNodes deletes all nodes.
func cleanupNodes(cs clientset.Interface, t *testing.T) {
err := cs.CoreV1().Nodes().DeleteCollection(
metav1.NewDeleteOptions(0), metav1.ListOptions{})
if err != nil {
t.Errorf("error while deleting all nodes: %v", err)
}
}
type pausePodConfig struct {
Name string
Namespace string
@ -442,6 +495,43 @@ func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
if err != nil {
return nil, fmt.Errorf("Error creating pause pod: %v", err)
}
if err = waitForPodToSchedule(cs, pod); err != nil {
return pod, fmt.Errorf("Pod %v/%v didn't schedule successfully. Error: %v", pod.Namespace, pod.Name, err)
}
if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
return pod, fmt.Errorf("Error getting pod %v/%v info: %v", pod.Namespace, pod.Name, err)
}
return pod, nil
}
type podWithContainersConfig struct {
Name string
Namespace string
Containers []v1.Container
}
// initPodWithContainers initializes a pod API object from the given config. This is used primarily for generating
// pods with containers each having a specific image.
func initPodWithContainers(cs clientset.Interface, conf *podWithContainersConfig) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: conf.Name,
Namespace: conf.Namespace,
},
Spec: v1.PodSpec{
Containers: conf.Containers,
},
}
return pod
}
// runPodWithContainers creates a pod with given config and containers and waits
// until it is scheduled. It returns its pointer and error status.
func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("Error creating pod-with-containers: %v", err)
}
if err = waitForPodToSchedule(cs, pod); err != nil {
return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err)
}
@ -539,20 +629,20 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
}
// waitCachedPDBsStable waits for PDBs in scheduler cache to have "CurrentHealthy" status equal to
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
// the expected values.
func waitCachedPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
func waitForPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{})
if err != nil {
return false, err
}
if len(cachedPDBs) != len(pdbs) {
if len(pdbList.Items) != len(pdbs) {
return false, nil
}
for i, pdb := range pdbs {
found := false
for _, cpdb := range cachedPDBs {
for _, cpdb := range pdbList.Items {
if pdb.Name == cpdb.Name && pdb.Namespace == cpdb.Namespace {
found = true
if cpdb.Status.CurrentHealthy != pdbPodNum[i] {
@ -638,3 +728,15 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
}
}
func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
schedulerCacheIsEmpty := func() (bool, error) {
snapshot := sched.Cache().Snapshot()
return len(snapshot.Nodes) == 0 && len(snapshot.AssumedPods) == 0, nil
}
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
t.Errorf("Failed to wait for scheduler cache cleanup: %v", err)
}
}

File diff suppressed because it is too large Load Diff