mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
131
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
131
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
@ -1,128 +1,88 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cache_comparer.go",
|
||||
"factory.go",
|
||||
"plugins.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"signal_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
"signal.go",
|
||||
"signal_windows.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/factory",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/validation:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/core/equivalence:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache/debugger:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_comparer_test.go",
|
||||
"factory_test.go",
|
||||
"plugins_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testing:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/latest:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/internal/cache/fake:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -137,4 +97,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
161
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer.go
generated
vendored
161
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
v1beta1 "k8s.io/client-go/listers/policy/v1beta1"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
)
|
||||
|
||||
type cacheComparer struct {
|
||||
nodeLister corelisters.NodeLister
|
||||
podLister corelisters.PodLister
|
||||
pdbLister v1beta1.PodDisruptionBudgetLister
|
||||
cache schedulercache.Cache
|
||||
podQueue core.SchedulingQueue
|
||||
|
||||
compareStrategy
|
||||
}
|
||||
|
||||
func (c *cacheComparer) Compare() error {
|
||||
glog.V(3).Info("cache comparer started")
|
||||
defer glog.V(3).Info("cache comparer finished")
|
||||
|
||||
nodes, err := c.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pods, err := c.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pdbs, err := c.pdbLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot := c.cache.Snapshot()
|
||||
|
||||
waitingPods := c.podQueue.WaitingPods()
|
||||
|
||||
if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant)
|
||||
}
|
||||
|
||||
if missed, redundant := c.ComparePods(pods, waitingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant)
|
||||
}
|
||||
|
||||
if missed, redundant := c.ComparePdbs(pdbs, snapshot.Pdbs); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed pdbs: %s; redundant pdbs: %s", missed, redundant)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type compareStrategy struct {
|
||||
}
|
||||
|
||||
func (c compareStrategy) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, node := range nodes {
|
||||
actual = append(actual, node.Name)
|
||||
}
|
||||
|
||||
cached := []string{}
|
||||
for nodeName := range nodeinfos {
|
||||
cached = append(cached, nodeName)
|
||||
}
|
||||
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func (c compareStrategy) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, pod := range pods {
|
||||
actual = append(actual, string(pod.UID))
|
||||
}
|
||||
|
||||
cached := []string{}
|
||||
for _, nodeinfo := range nodeinfos {
|
||||
for _, pod := range nodeinfo.Pods() {
|
||||
cached = append(cached, string(pod.UID))
|
||||
}
|
||||
}
|
||||
for _, pod := range waitingPods {
|
||||
cached = append(cached, string(pod.UID))
|
||||
}
|
||||
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func (c compareStrategy) ComparePdbs(pdbs []*policy.PodDisruptionBudget, pdbCache map[string]*policy.PodDisruptionBudget) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, pdb := range pdbs {
|
||||
actual = append(actual, string(pdb.UID))
|
||||
}
|
||||
|
||||
cached := []string{}
|
||||
for pdbUID := range pdbCache {
|
||||
cached = append(cached, pdbUID)
|
||||
}
|
||||
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func compareStrings(actual, cached []string) (missed, redundant []string) {
|
||||
missed, redundant = []string{}, []string{}
|
||||
|
||||
sort.Strings(actual)
|
||||
sort.Strings(cached)
|
||||
|
||||
compare := func(i, j int) int {
|
||||
if i == len(actual) {
|
||||
return 1
|
||||
} else if j == len(cached) {
|
||||
return -1
|
||||
}
|
||||
return strings.Compare(actual[i], cached[j])
|
||||
}
|
||||
|
||||
for i, j := 0, 0; i < len(actual) || j < len(cached); {
|
||||
switch compare(i, j) {
|
||||
case 0:
|
||||
i++
|
||||
j++
|
||||
case -1:
|
||||
missed = append(missed, actual[i])
|
||||
i++
|
||||
case 1:
|
||||
redundant = append(redundant, cached[j])
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
228
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer_test.go
generated
vendored
228
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer_test.go
generated
vendored
@ -1,228 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
func TestCompareNodes(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
nodes := []*v1.Node{}
|
||||
for _, nodeName := range test.actual {
|
||||
node := &v1.Node{}
|
||||
node.Name = nodeName
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
nodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
for _, nodeName := range test.cached {
|
||||
nodeInfo[nodeName] = &schedulercache.NodeInfo{}
|
||||
}
|
||||
|
||||
m, r := compare.CompareNodes(nodes, nodeInfo)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparePods(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
queued []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
queued: []string{},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"foo", "foobar"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
queued: []string{},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"foo"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
queued: []string{},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"foobar", "foo"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pods := []*v1.Pod{}
|
||||
for _, uid := range test.actual {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
queuedPods := []*v1.Pod{}
|
||||
for _, uid := range test.queued {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
queuedPods = append(queuedPods, pod)
|
||||
}
|
||||
|
||||
nodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
for _, uid := range test.cached {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
pod.Namespace = "ns"
|
||||
pod.Name = uid
|
||||
|
||||
nodeInfo[uid] = schedulercache.NewNodeInfo(pod)
|
||||
}
|
||||
|
||||
m, r := compare.ComparePods(pods, queuedPods, nodeInfo)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparePdbs(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pdbs := []*policy.PodDisruptionBudget{}
|
||||
for _, uid := range test.actual {
|
||||
pdb := &policy.PodDisruptionBudget{}
|
||||
pdb.UID = types.UID(uid)
|
||||
pdbs = append(pdbs, pdb)
|
||||
}
|
||||
|
||||
cache := make(map[string]*policy.PodDisruptionBudget)
|
||||
for _, uid := range test.cached {
|
||||
pdb := &policy.PodDisruptionBudget{}
|
||||
pdb.UID = types.UID(uid)
|
||||
cache[uid] = pdb
|
||||
}
|
||||
|
||||
m, r := compare.ComparePdbs(pdbs, cache)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
634
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
634
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
File diff suppressed because it is too large
Load Diff
366
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
366
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
@ -19,8 +19,6 @@ package factory
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -28,20 +26,20 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
fakeV1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
apitesting "k8s.io/kubernetes/pkg/api/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
@ -49,18 +47,14 @@ import (
|
||||
const (
|
||||
enableEquivalenceCache = true
|
||||
disablePodPreemption = false
|
||||
bindTimeoutSeconds = 600
|
||||
)
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
factory.Create()
|
||||
}
|
||||
|
||||
@ -70,15 +64,10 @@ func TestCreateFromConfig(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
@ -115,15 +104,10 @@ func TestCreateFromConfigWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
@ -161,15 +145,10 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
configData = []byte(`{}`)
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
@ -183,15 +162,10 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
||||
// predicate/priority.
|
||||
// The predicate/priority from DefaultProvider will be used.
|
||||
func TestCreateFromConfigWithUnspecifiedPredicatesOrPriorities(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
@ -223,15 +197,10 @@ func TestCreateFromConfigWithUnspecifiedPredicatesOrPriorities(t *testing.T) {
|
||||
// predicate/priority.
|
||||
// Empty predicate/priority sets will be used.
|
||||
func TestCreateFromConfigWithEmptyPredicatesOrPriorities(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
@ -282,24 +251,16 @@ func TestDefaultErrorFunc(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"},
|
||||
Spec: apitesting.V1DeepEqualSafePodSpec(),
|
||||
}
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: runtime.EncodeOrDie(schedulertesting.Test.Codec(), testPod),
|
||||
T: t,
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// FakeHandler mustn't be sent requests other than the one you want to test.
|
||||
mux.Handle(schedulertesting.Test.ResourcePath(string(v1.ResourcePods), "bar", "foo"), &handler)
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
queue := &core.FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
queue := &internalqueue.FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
podBackoff := util.CreatePodBackoff(1*time.Millisecond, 1*time.Second)
|
||||
errFunc := factory.MakeDefaultErrorFunc(podBackoff, queue)
|
||||
|
||||
errFunc(testPod, nil)
|
||||
|
||||
for {
|
||||
// This is a terrible way to do this but I plan on replacing this
|
||||
// whole error handling system in the future. The test will time
|
||||
@ -309,7 +270,27 @@ func TestDefaultErrorFunc(t *testing.T) {
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
handler.ValidateRequest(t, schedulertesting.Test.ResourcePath(string(v1.ResourcePods), "bar", "foo"), "GET", nil)
|
||||
requestReceived := false
|
||||
actions := client.Actions()
|
||||
for _, a := range actions {
|
||||
if a.GetVerb() == "get" {
|
||||
getAction, ok := a.(clienttesting.GetAction)
|
||||
if !ok {
|
||||
t.Errorf("Can't cast action object to GetAction interface")
|
||||
break
|
||||
}
|
||||
name := getAction.GetName()
|
||||
ns := a.GetNamespace()
|
||||
if name != "foo" || ns != "bar" {
|
||||
t.Errorf("Expected name %s namespace %s, got %s %s",
|
||||
"foo", "bar", name, ns)
|
||||
}
|
||||
requestReceived = true
|
||||
}
|
||||
}
|
||||
if !requestReceived {
|
||||
t.Errorf("Get pod request not received")
|
||||
}
|
||||
if e, a := testPod, got; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
@ -331,64 +312,81 @@ func TestNodeEnumerator(t *testing.T) {
|
||||
t.Fatalf("expected %v, got %v", e, a)
|
||||
}
|
||||
for i := range testList.Items {
|
||||
gotObj := me.Get(i)
|
||||
if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %#v, got %v#", e, a)
|
||||
}
|
||||
t.Run(fmt.Sprintf("node enumerator/%v", i), func(t *testing.T) {
|
||||
gotObj := me.Get(i)
|
||||
if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %#v, got %v#", e, a)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBind(t *testing.T) {
|
||||
table := []struct {
|
||||
name string
|
||||
binding *v1.Binding
|
||||
}{
|
||||
{binding: &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Name: "foo",
|
||||
{
|
||||
name: "binding can bind and validate request",
|
||||
binding: &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Name: "foo",
|
||||
},
|
||||
Target: v1.ObjectReference{
|
||||
Name: "foohost.kubernetes.mydomain.com",
|
||||
},
|
||||
},
|
||||
Target: v1.ObjectReference{
|
||||
Name: "foohost.kubernetes.mydomain.com",
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
b := binder{client}
|
||||
for _, test := range table {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testBind(test.binding, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.Bind(item.binding); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
expectedBody := runtime.EncodeOrDie(schedulertesting.Test.Codec(), item.binding)
|
||||
handler.ValidateRequest(t,
|
||||
schedulertesting.Test.SubResourcePath(string(v1.ResourcePods), metav1.NamespaceDefault, "foo", "binding"),
|
||||
"POST", &expectedBody)
|
||||
func testBind(binding *v1.Binding, t *testing.T) {
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: binding.GetName(), Namespace: metav1.NamespaceDefault},
|
||||
Spec: apitesting.V1DeepEqualSafePodSpec(),
|
||||
}
|
||||
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}})
|
||||
|
||||
b := binder{client}
|
||||
|
||||
if err := b.Bind(binding); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
pod := client.CoreV1().Pods(metav1.NamespaceDefault).(*fakeV1.FakePods)
|
||||
|
||||
bind, err := pod.GetBinding(binding.GetName())
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
expectedBody := runtime.EncodeOrDie(schedulertesting.Test.Codec(), binding)
|
||||
bind.APIVersion = ""
|
||||
bind.Kind = ""
|
||||
body := runtime.EncodeOrDie(schedulertesting.Test.Codec(), bind)
|
||||
if expectedBody != body {
|
||||
t.Errorf("Expected body %s, Got %s", expectedBody, body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
client := fake.NewSimpleClientset()
|
||||
// factory of "default-scheduler"
|
||||
factory := newConfigFactory(client, -1)
|
||||
stopCh := make(chan struct{})
|
||||
factory := newConfigFactory(client, -1, stopCh)
|
||||
defer close(stopCh)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: invalid hardPodAffinitySymmetricWeight, got nothing")
|
||||
@ -396,48 +394,49 @@ func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInvalidFactoryArgs(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
hardPodAffinitySymmetricWeight int32
|
||||
expectErr string
|
||||
}{
|
||||
{
|
||||
name: "symmetric weight below range",
|
||||
hardPodAffinitySymmetricWeight: -1,
|
||||
expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100",
|
||||
},
|
||||
{
|
||||
name: "symmetric weight above range",
|
||||
hardPodAffinitySymmetricWeight: 101,
|
||||
expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
factory := newConfigFactory(client, test.hardPodAffinitySymmetricWeight)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: %s, got nothing", test.expectErr)
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
factory := newConfigFactory(client, test.hardPodAffinitySymmetricWeight, stopCh)
|
||||
defer close(stopCh)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: %s, got nothing", test.expectErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSkipPodUpdate(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
table := []struct {
|
||||
pod *v1.Pod
|
||||
isAssumedPodFunc func(*v1.Pod) bool
|
||||
getPodFunc func(*v1.Pod) *v1.Pod
|
||||
expected bool
|
||||
name string
|
||||
}{
|
||||
// Non-assumed pod should not be skipped.
|
||||
{
|
||||
name: "Non-assumed pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
@ -453,9 +452,8 @@ func TestSkipPodUpdate(t *testing.T) {
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
// Pod update (with changes on ResourceVersion, Spec.NodeName and/or
|
||||
// Annotations) for an already assumed pod should be skipped.
|
||||
{
|
||||
name: "with changes on ResourceVersion, Spec.NodeName and/or Annotations",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
@ -483,9 +481,8 @@ func TestSkipPodUpdate(t *testing.T) {
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
// Pod update (with changes on Labels) for an already assumed pod
|
||||
// should not be skipped.
|
||||
{
|
||||
name: "with changes on Labels",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
@ -505,23 +502,26 @@ func TestSkipPodUpdate(t *testing.T) {
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
} {
|
||||
c := &configFactory{
|
||||
schedulerCache: &schedulertesting.FakeCache{
|
||||
IsAssumedPodFunc: test.isAssumedPodFunc,
|
||||
GetPodFunc: test.getPodFunc,
|
||||
},
|
||||
}
|
||||
got := c.skipPodUpdate(test.pod)
|
||||
if got != test.expected {
|
||||
t.Errorf("skipPodUpdate() = %t, expected = %t", got, test.expected)
|
||||
}
|
||||
}
|
||||
for _, test := range table {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
c := &configFactory{
|
||||
schedulerCache: &fakecache.Cache{
|
||||
IsAssumedPodFunc: test.isAssumedPodFunc,
|
||||
GetPodFunc: test.getPodFunc,
|
||||
},
|
||||
}
|
||||
got := c.skipPodUpdate(test.pod)
|
||||
if got != test.expected {
|
||||
t.Errorf("skipPodUpdate() = %t, expected = %t", got, test.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigFactory(client *clientset.Clientset, hardPodAffinitySymmetricWeight int32) scheduler.Configurator {
|
||||
func newConfigFactory(client clientset.Interface, hardPodAffinitySymmetricWeight int32, stopCh <-chan struct{}) Configurator {
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
return NewConfigFactory(
|
||||
return NewConfigFactory(&ConfigFactoryArgs{
|
||||
v1.DefaultSchedulerName,
|
||||
client,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
@ -529,15 +529,18 @@ func newConfigFactory(client *clientset.Clientset, hardPodAffinitySymmetricWeigh
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Apps().V1().ReplicaSets(),
|
||||
informerFactory.Apps().V1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
hardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
disablePodPreemption,
|
||||
)
|
||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
bindTimeoutSeconds,
|
||||
stopCh,
|
||||
})
|
||||
}
|
||||
|
||||
type fakeExtender struct {
|
||||
@ -546,6 +549,10 @@ type fakeExtender struct {
|
||||
ignorable bool
|
||||
}
|
||||
|
||||
func (f *fakeExtender) Name() string {
|
||||
return "fakeExtender"
|
||||
}
|
||||
|
||||
func (f *fakeExtender) IsIgnorable() bool {
|
||||
return f.ignorable
|
||||
}
|
||||
@ -593,24 +600,22 @@ func (f *fakeExtender) IsInterested(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
func TestGetBinderFunc(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
podName string
|
||||
extenders []algorithm.SchedulerExtender
|
||||
|
||||
table := []struct {
|
||||
podName string
|
||||
extenders []algorithm.SchedulerExtender
|
||||
expectedBinderType string
|
||||
name string
|
||||
}{
|
||||
// Expect to return the default binder because the extender is not a
|
||||
// binder, even though it's interested in the pod.
|
||||
{
|
||||
name: "the extender is not a binder",
|
||||
podName: "pod0",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
|
||||
},
|
||||
expectedBinderType: "*factory.binder",
|
||||
},
|
||||
// Expect to return the fake binder because one of the extenders is a
|
||||
// binder and it's interested in the pod.
|
||||
{
|
||||
name: "one of the extenders is a binder and interested in pod",
|
||||
podName: "pod0",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
|
||||
@ -618,9 +623,8 @@ func TestGetBinderFunc(t *testing.T) {
|
||||
},
|
||||
expectedBinderType: "*factory.fakeExtender",
|
||||
},
|
||||
// Expect to return the default binder because one of the extenders is
|
||||
// a binder but the binder is not interested in the pod.
|
||||
{
|
||||
name: "one of the extenders is a binder, but not interested in pod",
|
||||
podName: "pod1",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod1"},
|
||||
@ -628,20 +632,28 @@ func TestGetBinderFunc(t *testing.T) {
|
||||
},
|
||||
expectedBinderType: "*factory.binder",
|
||||
},
|
||||
} {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: test.podName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
f := &configFactory{}
|
||||
binderFunc := f.getBinderFunc(test.extenders)
|
||||
binder := binderFunc(pod)
|
||||
|
||||
binderType := fmt.Sprintf("%s", reflect.TypeOf(binder))
|
||||
if binderType != test.expectedBinderType {
|
||||
t.Errorf("Expected binder %q but got %q", test.expectedBinderType, binderType)
|
||||
}
|
||||
for _, test := range table {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testGetBinderFunc(test.expectedBinderType, test.podName, test.extenders, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testGetBinderFunc(expectedBinderType, podName string, extenders []algorithm.SchedulerExtender, t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
}
|
||||
|
||||
f := &configFactory{}
|
||||
binderFunc := f.getBinderFunc(extenders)
|
||||
binder := binderFunc(pod)
|
||||
|
||||
binderType := fmt.Sprintf("%s", reflect.TypeOf(binder))
|
||||
if binderType != expectedBinderType {
|
||||
t.Errorf("Expected binder %q but got %q", expectedBinderType, binderType)
|
||||
}
|
||||
}
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// PluginFactoryArgs are passed to all plugin factory functions.
|
||||
@ -41,6 +41,7 @@ type PluginFactoryArgs struct {
|
||||
ReplicaSetLister algorithm.ReplicaSetLister
|
||||
StatefulSetLister algorithm.StatefulSetLister
|
||||
NodeLister algorithm.NodeLister
|
||||
PDBLister algorithm.PDBLister
|
||||
NodeInfo predicates.NodeInfo
|
||||
PVInfo predicates.PersistentVolumeInfo
|
||||
PVCInfo predicates.PersistentVolumeClaimInfo
|
||||
@ -166,6 +167,17 @@ func InsertPredicateKeyToAlgorithmProviderMap(key string) {
|
||||
return
|
||||
}
|
||||
|
||||
// InsertPriorityKeyToAlgorithmProviderMap inserts a priority function to all algorithmProviders which are in algorithmProviderMap.
|
||||
func InsertPriorityKeyToAlgorithmProviderMap(key string) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
for _, provider := range algorithmProviderMap {
|
||||
provider.PriorityFunctionKeys.Insert(key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by
|
||||
// kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was
|
||||
// registered.
|
||||
@ -221,12 +233,12 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
|
||||
}
|
||||
} else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok {
|
||||
// checking to see if a pre-defined predicate is requested
|
||||
glog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name)
|
||||
klog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name)
|
||||
return policy.Name
|
||||
}
|
||||
|
||||
if predicateFactory == nil {
|
||||
glog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name)
|
||||
klog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name)
|
||||
}
|
||||
|
||||
return RegisterFitPredicateFactory(policy.Name, predicateFactory)
|
||||
@ -333,7 +345,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
|
||||
}
|
||||
}
|
||||
} else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok {
|
||||
glog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name)
|
||||
klog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name)
|
||||
// set/update the weight based on the policy
|
||||
pcf = &PriorityConfigFactory{
|
||||
Function: existingPcf.Function,
|
||||
@ -343,7 +355,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
|
||||
}
|
||||
|
||||
if pcf == nil {
|
||||
glog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name)
|
||||
klog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name)
|
||||
}
|
||||
|
||||
return RegisterPriorityConfigFactory(policy.Name, *pcf)
|
||||
@ -357,7 +369,7 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s
|
||||
}
|
||||
shape, err := priorities.NewFunctionShape(points)
|
||||
if err != nil {
|
||||
glog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
|
||||
klog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
|
||||
}
|
||||
return shape
|
||||
}
|
||||
@ -488,7 +500,7 @@ var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$")
|
||||
|
||||
func validateAlgorithmNameOrDie(name string) {
|
||||
if !validName.MatchString(name) {
|
||||
glog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
|
||||
klog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,7 +514,7 @@ func validatePredicateOrDie(predicate schedulerapi.PredicatePolicy) {
|
||||
numArgs++
|
||||
}
|
||||
if numArgs != 1 {
|
||||
glog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name)
|
||||
klog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -520,7 +532,7 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {
|
||||
numArgs++
|
||||
}
|
||||
if numArgs != 1 {
|
||||
glog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name)
|
||||
klog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
36
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins_test.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins_test.go
generated
vendored
@ -36,14 +36,18 @@ func TestAlgorithmNameValidation(t *testing.T) {
|
||||
"Some,Alg:orithm",
|
||||
}
|
||||
for _, name := range algorithmNamesShouldValidate {
|
||||
if !validName.MatchString(name) {
|
||||
t.Errorf("%v should be a valid algorithm name but is not valid.", name)
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !validName.MatchString(name) {
|
||||
t.Errorf("should be a valid algorithm name but is not valid.")
|
||||
}
|
||||
})
|
||||
}
|
||||
for _, name := range algorithmNamesShouldNotValidate {
|
||||
if validName.MatchString(name) {
|
||||
t.Errorf("%v should be an invalid algorithm name but is valid.", name)
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if validName.MatchString(name) {
|
||||
t.Errorf("should be an invalid algorithm name but is valid.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,16 +74,18 @@ func TestValidatePriorityConfigOverFlow(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
err := validateSelectedConfigs(test.configs)
|
||||
if test.expected {
|
||||
if err == nil {
|
||||
t.Errorf("Expected Overflow for %s", test.description)
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
err := validateSelectedConfigs(test.configs)
|
||||
if test.expected {
|
||||
if err == nil {
|
||||
t.Errorf("Expected Overflow")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an overflow")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an overflow for %s", test.description)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user