mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor updates
This commit is contained in:
29
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
29
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
@ -19,31 +19,27 @@ go_test(
|
||||
"taint_test.go",
|
||||
"volume_binding_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-scheduler/app:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/node/ipam:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//plugin/cmd/kube-scheduler/app:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/scheduler/core:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -57,6 +53,7 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
@ -89,9 +86,9 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
39
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
39
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
@ -38,18 +38,19 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
filter = "filter"
|
||||
prioritize = "prioritize"
|
||||
bind = "bind"
|
||||
filter = "filter"
|
||||
prioritize = "prioritize"
|
||||
bind = "bind"
|
||||
extendedResourceName = "foo.com/bar"
|
||||
)
|
||||
|
||||
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
|
||||
@ -334,7 +335,7 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
Weight: 3,
|
||||
EnableHttps: false,
|
||||
EnableHTTPS: false,
|
||||
},
|
||||
{
|
||||
URLPrefix: es2.URL,
|
||||
@ -342,14 +343,20 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
PrioritizeVerb: prioritize,
|
||||
BindVerb: bind,
|
||||
Weight: 4,
|
||||
EnableHttps: false,
|
||||
EnableHTTPS: false,
|
||||
ManagedResources: []schedulerapi.ExtenderManagedResource{
|
||||
{
|
||||
Name: extendedResourceName,
|
||||
IgnoredByScheduler: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
URLPrefix: es3.URL,
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
Weight: 10,
|
||||
EnableHttps: false,
|
||||
EnableHTTPS: false,
|
||||
NodeCacheCapable: true,
|
||||
},
|
||||
},
|
||||
@ -420,7 +427,17 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: e2e.GetPauseImageName(cs),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
extendedResourceName: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
21
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
@ -33,8 +33,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/core"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -42,14 +41,13 @@ import (
|
||||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
|
||||
func waitForNominatedNodeAnnotation(cs clientset.Interface, pod *v1.Pod) error {
|
||||
func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
annot, found := pod.Annotations[core.NominatedNodeAnnotationKey]
|
||||
if found && len(annot) > 0 {
|
||||
if len(pod.Status.NominatedNodeName) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
@ -276,7 +274,7 @@ func TestPreemption(t *testing.T) {
|
||||
}
|
||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
@ -389,7 +387,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
t.Errorf("Error while creating the preempting pod: %v", err)
|
||||
}
|
||||
// Check that the preemptor pod gets the annotation for nominated node name.
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
@ -462,7 +460,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
t.Errorf("Error while creating the medium priority pod: %v", err)
|
||||
}
|
||||
// Step 3. Check that nominated node name of the medium priority pod is set.
|
||||
if err := waitForNominatedNodeAnnotation(cs, medPriPod); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, medPriPod); err != nil {
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
|
||||
}
|
||||
// Step 4. Create a high priority pod.
|
||||
@ -480,7 +478,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
t.Errorf("Error while creating the high priority pod: %v", err)
|
||||
}
|
||||
// Step 5. Check that nominated node name of the high priority pod is set.
|
||||
if err := waitForNominatedNodeAnnotation(cs, highPriPod); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, highPriPod); err != nil {
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
|
||||
}
|
||||
// And the nominated node name of the medium priority pod is cleared.
|
||||
@ -489,8 +487,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error getting the medium priority pod info: %v", err)
|
||||
}
|
||||
n, found := pod.Annotations[core.NominatedNodeAnnotationKey]
|
||||
if !found || len(n) == 0 {
|
||||
if len(pod.Status.NominatedNodeName) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
@ -755,7 +752,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
}
|
||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeAnnotation(cs, preemptor); err != nil {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
|
182
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
182
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -39,16 +40,16 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
schedulerapp "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -97,69 +98,128 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
factory.RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
factory.RegisterPriorityFunction("PriorityTwo", PriorityTwo, 1)
|
||||
|
||||
// Add a ConfigMap object.
|
||||
configPolicyName := "scheduler-custom-policy-config"
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{
|
||||
componentconfig.SchedulerPolicyConfigMapKey: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "PredicateOne"},
|
||||
{"name" : "PredicateTwo"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "PriorityOne", "weight" : 1},
|
||||
{"name" : "PriorityTwo", "weight" : 5}
|
||||
]
|
||||
for i, test := range []struct {
|
||||
policy string
|
||||
expectedPredicates sets.String
|
||||
expectedPrioritizers sets.String
|
||||
}{
|
||||
{
|
||||
policy: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "PredicateOne"},
|
||||
{"name" : "PredicateTwo"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "PriorityOne", "weight" : 1},
|
||||
{"name" : "PriorityTwo", "weight" : 5}
|
||||
]
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"PredicateOne",
|
||||
"PredicateTwo",
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(
|
||||
"PriorityOne",
|
||||
"PriorityTwo",
|
||||
),
|
||||
},
|
||||
}
|
||||
{
|
||||
policy: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1"
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckVolumeBinding",
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"NoDiskConflict",
|
||||
"NoVolumeZoneConflict",
|
||||
"PodToleratesNodeTaints",
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(
|
||||
"BalancedResourceAllocation",
|
||||
"InterPodAffinityPriority",
|
||||
"LeastRequestedPriority",
|
||||
"NodeAffinityPriority",
|
||||
"NodePreferAvoidPodsPriority",
|
||||
"SelectorSpreadPriority",
|
||||
"TaintTolerationPriority",
|
||||
),
|
||||
},
|
||||
{
|
||||
policy: `{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [],
|
||||
"priorities" : []
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(),
|
||||
},
|
||||
} {
|
||||
// Add a ConfigMap object.
|
||||
configPolicyName := fmt.Sprintf("scheduler-custom-policy-config-%d", i)
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
policyConfigMap.APIVersion = testapi.Groups[v1.GroupName].GroupVersion().String()
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
|
||||
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
ss := &schedulerapp.SchedulerServer{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
||||
EventClient: clientSet.CoreV1(),
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
Broadcaster: eventBroadcaster,
|
||||
}
|
||||
|
||||
config, err := ss.SchedulerConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't make scheduler config: %v", err)
|
||||
}
|
||||
config, err := ss.SchedulerConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't make scheduler config: %v", err)
|
||||
}
|
||||
|
||||
// Verify that the config is applied correctly.
|
||||
schedPredicates := config.Algorithm.Predicates()
|
||||
schedPrioritizers := config.Algorithm.Prioritizers()
|
||||
// Includes one mandatory predicates.
|
||||
if len(schedPredicates) != 3 || len(schedPrioritizers) != 2 {
|
||||
t.Errorf("Unexpected number of predicates or priority functions. Number of predicates: %v, number of prioritizers: %v", len(schedPredicates), len(schedPrioritizers))
|
||||
}
|
||||
// Check a predicate and a priority function.
|
||||
if schedPredicates["PredicateTwo"] == nil {
|
||||
t.Errorf("Expected to have a PodFitsHostPorts predicate.")
|
||||
}
|
||||
if schedPrioritizers[1].Function == nil || schedPrioritizers[1].Weight != 5 {
|
||||
t.Errorf("Unexpected prioritizer: func: %v, weight: %v", schedPrioritizers[1].Function, schedPrioritizers[1].Weight)
|
||||
// Verify that the config is applied correctly.
|
||||
schedPredicates := sets.NewString()
|
||||
for k := range config.Algorithm.Predicates() {
|
||||
schedPredicates.Insert(k)
|
||||
}
|
||||
schedPrioritizers := sets.NewString()
|
||||
for _, p := range config.Algorithm.Prioritizers() {
|
||||
schedPrioritizers.Insert(p.Name)
|
||||
}
|
||||
if !schedPredicates.Equal(test.expectedPredicates) {
|
||||
t.Errorf("Expected predicates %v, got %v", test.expectedPredicates, schedPredicates)
|
||||
}
|
||||
if !schedPrioritizers.Equal(test.expectedPrioritizers) {
|
||||
t.Errorf("Expected priority functions %v, got %v", test.expectedPrioritizers, schedPrioritizers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
34
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
@ -37,22 +37,21 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/node"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// TestTaintNodeByCondition verifies:
|
||||
// 1. MemoryPressure Toleration is added to non-BestEffort Pod by PodTolerationRestriction
|
||||
// 2. NodeController taints nodes by node condition
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailabe
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
@ -85,29 +84,24 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
|
||||
// Start NodeController for taint.
|
||||
nc, err := node.NewNodeController(
|
||||
// Start NodeLifecycleController for taint.
|
||||
nc, err := nodelifecycle.NewNodeLifecycleController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Extensions().V1beta1().DaemonSets(),
|
||||
nil, // CloudProvider
|
||||
clientset,
|
||||
time.Second, // Node monitor grace period
|
||||
time.Second, // Node startup grace period
|
||||
time.Second, // Node monitor period
|
||||
time.Second, // Pod eviction timeout
|
||||
100, // Eviction limiter QPS
|
||||
100, // Secondary eviction limiter QPS
|
||||
100, // Large cluster threshold
|
||||
100, // Unhealthy zone threshold
|
||||
time.Second, // Node monitor grace period
|
||||
time.Second, // Node startup grace period
|
||||
time.Second, // Node monitor period
|
||||
nil, // Cluster CIDR
|
||||
nil, // Service CIDR
|
||||
0, // Node CIDR mask size
|
||||
false, // Allocate node CIDRs
|
||||
ipam.RangeAllocatorType, // Allocator type
|
||||
true, // Run taint manger
|
||||
true, // Enabled taint based eviction
|
||||
true, // Enabled TaintNodeByCondition feature
|
||||
true, // Run taint manager
|
||||
true, // Use taint based evictions
|
||||
true, // Enabled TaintNodeByCondition feature
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create node controller: %v", err)
|
||||
|
30
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@ -35,9 +35,9 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"net/http/httptest"
|
||||
@ -362,6 +362,18 @@ func waitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodToScheduleWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// waitForPodUnscheduleWithTimeout waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the given timeout.
|
||||
func waitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, podUnschedulable(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// waitForPodUnschedule waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the timeout duration (30 seconds).
|
||||
func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// deletePod deletes the given pod in the given namespace.
|
||||
func deletePod(cs clientset.Interface, podName string, nsName string) error {
|
||||
return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
@ -381,15 +393,3 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// printAllPods prints a list of all the pods and their node names. This is used
|
||||
// for debugging.
|
||||
func printAllPods(t *testing.T, cs clientset.Interface, nsName string) {
|
||||
podList, err := cs.CoreV1().Pods(nsName).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Logf("Error getting pods: %v", err)
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
t.Logf("Pod:\n\tName:%v\n\tNamespace:%v\n\tNode Name:%v\n", pod.Name, pod.Namespace, pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
297
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
297
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
@ -22,6 +22,8 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -31,16 +33,18 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -64,78 +68,103 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
labelKey = "test-label"
|
||||
labelValue = "test-value"
|
||||
nodeName = "node1"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
node1 = "node-1"
|
||||
node2 = "node-2"
|
||||
podLimit = 100
|
||||
volsPerPod = 5
|
||||
nodeAffinityLabelKey = "kubernetes.io/hostname"
|
||||
)
|
||||
|
||||
func TestVolumeBinding(t *testing.T) {
|
||||
config := setup(t, "volume-scheduling")
|
||||
config := setupCluster(t, "volume-scheduling", 2)
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
pvs []*v1.PersistentVolume
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
// Create these, but they should not be bound in the end
|
||||
unboundPvcs []*v1.PersistentVolumeClaim
|
||||
unboundPvs []*v1.PersistentVolume
|
||||
shouldFail bool
|
||||
}{
|
||||
"immediate can bind": {
|
||||
pod: makePod("pod-i-canbind", config.ns, []string{"pvc-i-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "")},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-canbind", config.ns, &classImmediate, "")},
|
||||
},
|
||||
"immediate cannot bind": {
|
||||
pod: makePod("pod-i-cannotbind", config.ns, []string{"pvc-i-cannotbind"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-cannotbind", config.ns, &classImmediate, "")},
|
||||
shouldFail: true,
|
||||
},
|
||||
"immediate pvc prebound": {
|
||||
pod: makePod("pod-i-pvc-prebound", config.ns, []string{"pvc-i-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "")},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-prebound", config.ns, &classImmediate, "pv-i-pvc-prebound")},
|
||||
},
|
||||
"immediate pv prebound": {
|
||||
pod: makePod("pod-i-pv-prebound", config.ns, []string{"pvc-i-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns)},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns, node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-pv-prebound", config.ns, &classImmediate, "")},
|
||||
},
|
||||
"wait can bind": {
|
||||
pod: makePod("pod-w-canbind", config.ns, []string{"pvc-w-canbind"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "")},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-canbind", config.ns, &classWait, "")},
|
||||
},
|
||||
"wait cannot bind": {
|
||||
pod: makePod("pod-w-cannotbind", config.ns, []string{"pvc-w-cannotbind"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-cannotbind", config.ns, &classWait, "")},
|
||||
shouldFail: true,
|
||||
},
|
||||
"wait pvc prebound": {
|
||||
pod: makePod("pod-w-pvc-prebound", config.ns, []string{"pvc-w-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "")},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "", node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-prebound", config.ns, &classWait, "pv-w-pvc-prebound")},
|
||||
},
|
||||
"wait pv prebound": {
|
||||
pod: makePod("pod-w-pv-prebound", config.ns, []string{"pvc-w-pv-prebound"}),
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns)},
|
||||
pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns, node1)},
|
||||
pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-pv-prebound", config.ns, &classWait, "")},
|
||||
},
|
||||
"wait can bind two": {
|
||||
pod: makePod("pod-w-canbind-2", config.ns, []string{"pvc-w-canbind-2", "pvc-w-canbind-3"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-2", classWait, "", ""),
|
||||
makePV(t, "pv-w-canbind-3", classWait, "", ""),
|
||||
makePV(t, "pv-w-canbind-2", classWait, "", "", node2),
|
||||
makePV(t, "pv-w-canbind-3", classWait, "", "", node2),
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-2", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-canbind-3", config.ns, &classWait, ""),
|
||||
},
|
||||
unboundPvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-5", classWait, "", "", node1),
|
||||
},
|
||||
},
|
||||
"wait cannot bind two": {
|
||||
pod: makePod("pod-w-cannotbind-2", config.ns, []string{"pvc-w-cannotbind-1", "pvc-w-cannotbind-2"}),
|
||||
unboundPvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-cannotbind-1", config.ns, &classWait, ""),
|
||||
makePVC("pvc-w-cannotbind-2", config.ns, &classWait, ""),
|
||||
},
|
||||
unboundPvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-cannotbind-1", classWait, "", "", node2),
|
||||
makePV(t, "pv-w-cannotbind-2", classWait, "", "", node1),
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
"mix immediate and wait": {
|
||||
pod: makePod("pod-mix-bound", config.ns, []string{"pvc-w-canbind-4", "pvc-i-canbind-2"}),
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV(t, "pv-w-canbind-4", classWait, "", ""),
|
||||
makePV(t, "pv-i-canbind-2", classImmediate, "", ""),
|
||||
makePV(t, "pv-w-canbind-4", classWait, "", "", node1),
|
||||
makePV(t, "pv-i-canbind-2", classImmediate, "", "", node1),
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-w-canbind-4", config.ns, &classWait, ""),
|
||||
makePVC("pvc-i-canbind-2", config.ns, &classImmediate, ""),
|
||||
},
|
||||
},
|
||||
// TODO:
|
||||
// immediate mode - PVC cannot bound
|
||||
// wait mode - PVC cannot bind
|
||||
// wait mode - 2 PVCs, 1 cannot bind
|
||||
}
|
||||
|
||||
for name, test := range cases {
|
||||
@ -148,28 +177,51 @@ func TestVolumeBinding(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, pv := range test.unboundPvs {
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create PVCs
|
||||
for _, pvc := range test.pvcs {
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
for _, pvc := range test.unboundPvcs {
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create Pod
|
||||
if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
if err := waitForPodToSchedule(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err)
|
||||
if test.shouldFail {
|
||||
if err := waitForPodUnschedulable(config.client, test.pod); err != nil {
|
||||
t.Errorf("Pod %q was not unschedulable: %v", test.pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
if err := waitForPodToSchedule(config.client, test.pod); err != nil {
|
||||
t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate PVC/PV binding
|
||||
for _, pvc := range test.pvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimBound)
|
||||
}
|
||||
for _, pvc := range test.unboundPvcs {
|
||||
validatePVCPhase(t, config.client, pvc, v1.ClaimPending)
|
||||
}
|
||||
for _, pv := range test.pvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeBound)
|
||||
}
|
||||
for _, pv := range test.unboundPvs {
|
||||
validatePVPhase(t, config.client, pv, v1.VolumeAvailable)
|
||||
}
|
||||
|
||||
// TODO: validate events on Pods and PVCs
|
||||
|
||||
@ -181,14 +233,14 @@ func TestVolumeBinding(t *testing.T) {
|
||||
|
||||
// TestVolumeBindingStress creates <podLimit> pods, each with <volsPerPod> unbound PVCs.
|
||||
func TestVolumeBindingStress(t *testing.T) {
|
||||
config := setup(t, "volume-binding-stress")
|
||||
config := setupCluster(t, "volume-binding-stress", 1)
|
||||
defer config.teardown()
|
||||
|
||||
// Create enough PVs and PVCs for all the pods
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for i := 0; i < podLimit*volsPerPod; i++ {
|
||||
pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "")
|
||||
pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1)
|
||||
pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, &classWait, "")
|
||||
|
||||
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
@ -235,7 +287,66 @@ func TestVolumeBindingStress(t *testing.T) {
|
||||
// TODO: validate events on Pods and PVCs
|
||||
}
|
||||
|
||||
func setup(t *testing.T, nsName string) *testConfig {
|
||||
func TestPVAffinityConflict(t *testing.T) {
|
||||
config := setupCluster(t, "volume-scheduling", 3)
|
||||
defer config.teardown()
|
||||
|
||||
pv := makePV(t, "local-pv", classImmediate, "", "", node1)
|
||||
pvc := makePVC("local-pvc", config.ns, &classImmediate, "")
|
||||
|
||||
// Create PV
|
||||
if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
|
||||
}
|
||||
|
||||
// Create PVC
|
||||
if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
// Wait for PVC bound
|
||||
if err := waitForPVCBound(config.client, pvc); err != nil {
|
||||
t.Fatalf("PVC %q failed to bind: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
nodeMarkers := []interface{}{
|
||||
markNodeAffinity,
|
||||
markNodeSelector,
|
||||
}
|
||||
for i := 0; i < len(nodeMarkers); i++ {
|
||||
podName := "local-pod-" + strconv.Itoa(i+1)
|
||||
pod := makePod(podName, config.ns, []string{"local-pvc"})
|
||||
nodeMarkers[i].(func(*v1.Pod, string))(pod, "node-2")
|
||||
// Create Pod
|
||||
if _, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
// Give time to shceduler to attempt to schedule pod
|
||||
if err := waitForPodUnschedulable(config.client, pod); err != nil {
|
||||
t.Errorf("Failed as Pod %s was not unschedulable: %v", pod.Name, err)
|
||||
}
|
||||
// Check pod conditions
|
||||
p, err := config.client.CoreV1().Pods(config.ns).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to access Pod %s status: %v", podName, err)
|
||||
}
|
||||
if strings.Compare(string(p.Status.Phase), "Pending") != 0 {
|
||||
t.Fatalf("Failed as Pod %s was in: %s state and not in expected: Pending state", podName, p.Status.Phase)
|
||||
}
|
||||
if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 {
|
||||
t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason)
|
||||
}
|
||||
if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match node selector") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") {
|
||||
t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict. Got message %q", podName, p.Status.Conditions[0].Message)
|
||||
}
|
||||
// Deleting test pod
|
||||
if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Failed to delete Pod %s: %v", podName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
@ -251,6 +362,7 @@ func setup(t *testing.T, nsName string) *testConfig {
|
||||
|
||||
// Start master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
ns := framework.CreateTestingNamespace(nsName, s, t).Name
|
||||
|
||||
@ -266,6 +378,7 @@ func setup(t *testing.T, nsName string) *testConfig {
|
||||
VolumeInformer: informers.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
EventRecorder: nil, // TODO: add one so we can test PV events
|
||||
EnableDynamicProvisioning: true,
|
||||
}
|
||||
@ -293,13 +406,16 @@ func setup(t *testing.T, nsName string) *testConfig {
|
||||
true, // Enable EqualCache by default.
|
||||
)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
|
||||
cfg.StopEverything = controllerCh
|
||||
cfg.Recorder = &record.FakeRecorder{}
|
||||
cfg.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientset.CoreV1().RESTClient()).Events("")})
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create scheduler: %v.", err)
|
||||
}
|
||||
|
||||
go sched.Run()
|
||||
|
||||
// Waiting for all controller sync.
|
||||
@ -307,35 +423,37 @@ func setup(t *testing.T, nsName string) *testConfig {
|
||||
informers.WaitForCacheSync(controllerCh)
|
||||
|
||||
// Create shared objects
|
||||
// Create node
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Labels: map[string]string{labelKey: labelValue},
|
||||
},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI),
|
||||
// Create nodes
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("node-%d", i+1),
|
||||
Labels: map[string]string{nodeAffinityLabelKey: fmt.Sprintf("node-%d", i+1)},
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil {
|
||||
t.Fatalf("Failed to create Node %q: %v", testNode.Name, err)
|
||||
}
|
||||
if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil {
|
||||
t.Fatalf("Failed to create Node %q: %v", testNode.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create SCs
|
||||
scs := []*storagev1.StorageClass{
|
||||
makeStorageClass(classWait, &modeWait),
|
||||
makeStorageClass(classImmediate, &modeImmediate),
|
||||
makeStorageClass(classWait, &modeWait),
|
||||
}
|
||||
for _, sc := range scs {
|
||||
if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil {
|
||||
@ -370,7 +488,7 @@ func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1
|
||||
}
|
||||
}
|
||||
|
||||
func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume {
|
||||
func makePV(t *testing.T, name, scName, pvcName, ns, node string) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -389,6 +507,21 @@ func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume
|
||||
Path: "/test-path",
|
||||
},
|
||||
},
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeAffinityLabelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{node},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -396,25 +529,6 @@ func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{Name: pvcName, Namespace: ns}
|
||||
}
|
||||
|
||||
testNodeAffinity := &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{labelValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := helper.StorageNodeAffinityToAlphaAnnotation(pv.Annotations, testNodeAffinity)
|
||||
if err != nil {
|
||||
t.Fatalf("Setting storage node affinity failed: %v", err)
|
||||
}
|
||||
return pv
|
||||
}
|
||||
|
||||
@ -461,7 +575,7 @@ func makePod(name, ns string, pvcs []string) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Image: "k8s.gcr.io/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "while true; do sleep 1; done"},
|
||||
},
|
||||
@ -492,3 +606,44 @@ func validatePVPhase(t *testing.T, client clientset.Interface, pv *v1.Persistent
|
||||
t.Errorf("PV %v phase not %v, got %v", pv.Name, phase, pv.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPVCBound(client clientset.Interface, pvc *v1.PersistentVolumeClaim) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if claim.Status.Phase == v1.ClaimBound {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func markNodeAffinity(pod *v1.Pod, node string) {
|
||||
affinity := &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeAffinityLabelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{node},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Spec.Affinity = affinity
|
||||
}
|
||||
|
||||
func markNodeSelector(pod *v1.Pod, node string) {
|
||||
ns := map[string]string{
|
||||
nodeAffinityLabelKey: node,
|
||||
}
|
||||
pod.Spec.NodeSelector = ns
|
||||
}
|
||||
|
Reference in New Issue
Block a user