mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
107
vendor/k8s.io/kubernetes/pkg/scheduler/BUILD
generated
vendored
107
vendor/k8s.io/kubernetes/pkg/scheduler/BUILD
generated
vendored
@ -1,36 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["scheduler_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -39,25 +7,74 @@ go_library(
|
||||
"testutil.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/api/latest:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"scheduler_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache/fake:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -75,13 +92,17 @@ filegroup(
|
||||
"//pkg/scheduler/algorithm:all-srcs",
|
||||
"//pkg/scheduler/algorithmprovider:all-srcs",
|
||||
"//pkg/scheduler/api:all-srcs",
|
||||
"//pkg/scheduler/apis/config:all-srcs",
|
||||
"//pkg/scheduler/cache:all-srcs",
|
||||
"//pkg/scheduler/core:all-srcs",
|
||||
"//pkg/scheduler/factory:all-srcs",
|
||||
"//pkg/scheduler/internal/cache:all-srcs",
|
||||
"//pkg/scheduler/internal/queue:all-srcs",
|
||||
"//pkg/scheduler/metrics:all-srcs",
|
||||
"//pkg/scheduler/testing:all-srcs",
|
||||
"//pkg/scheduler/util:all-srcs",
|
||||
"//pkg/scheduler/volumebinder:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD
generated
vendored
@ -12,31 +12,27 @@ go_library(
|
||||
"doc.go",
|
||||
"scheduler_interface.go",
|
||||
"types.go",
|
||||
"well_known_labels.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"scheduler_interface_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
srcs = ["types_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
47
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD
generated
vendored
47
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD
generated
vendored
@ -9,6 +9,7 @@ load(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"csi_volume_predicate.go",
|
||||
"error.go",
|
||||
"metadata.go",
|
||||
"predicates.go",
|
||||
@ -23,29 +24,32 @@ go_library(
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"csi_volume_predicate_test.go",
|
||||
"main_test.go",
|
||||
"max_attachable_volume_predicate_test.go",
|
||||
"metadata_test.go",
|
||||
"predicates_test.go",
|
||||
@ -57,17 +61,18 @@ go_test(
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
157
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go
generated
vendored
Normal file
157
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes
|
||||
type CSIMaxVolumeLimitChecker struct {
|
||||
pvInfo PersistentVolumeInfo
|
||||
pvcInfo PersistentVolumeClaimInfo
|
||||
}
|
||||
|
||||
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
|
||||
func NewCSIMaxVolumeLimitPredicate(
|
||||
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
|
||||
c := &CSIMaxVolumeLimitChecker{
|
||||
pvInfo: pvInfo,
|
||||
pvcInfo: pvcInfo,
|
||||
}
|
||||
return c.attachableLimitPredicate
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
|
||||
// if feature gate is disable we return
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
return true, nil, nil
|
||||
}
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
nodeVolumeLimits := nodeInfo.VolumeLimits()
|
||||
|
||||
// if node does not have volume limits this predicate should exit
|
||||
if len(nodeVolumeLimits) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// a map of unique volume name/csi volume handle and volume limit key
|
||||
newVolumes := make(map[string]string)
|
||||
if err := c.filterAttachableVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if len(newVolumes) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// a map of unique volume name/csi volume handle and volume limit key
|
||||
attachedVolumes := make(map[string]string)
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
if err := c.filterAttachableVolumes(existingPod.Spec.Volumes, existingPod.Namespace, attachedVolumes); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
newVolumeCount := map[string]int{}
|
||||
attachedVolumeCount := map[string]int{}
|
||||
|
||||
for volumeName, volumeLimitKey := range attachedVolumes {
|
||||
if _, ok := newVolumes[volumeName]; ok {
|
||||
delete(newVolumes, volumeName)
|
||||
}
|
||||
attachedVolumeCount[volumeLimitKey]++
|
||||
}
|
||||
|
||||
for _, volumeLimitKey := range newVolumes {
|
||||
newVolumeCount[volumeLimitKey]++
|
||||
}
|
||||
|
||||
for volumeLimitKey, count := range newVolumeCount {
|
||||
maxVolumeLimit, ok := nodeVolumeLimits[v1.ResourceName(volumeLimitKey)]
|
||||
if ok {
|
||||
currentVolumeCount := attachedVolumeCount[volumeLimitKey]
|
||||
if currentVolumeCount+count > int(maxVolumeLimit) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
||||
volumes []v1.Volume, namespace string, result map[string]string) error {
|
||||
|
||||
for _, vol := range volumes {
|
||||
// CSI volumes can only be used as persistent volumes
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
continue
|
||||
}
|
||||
pvcName := vol.PersistentVolumeClaim.ClaimName
|
||||
|
||||
if pvcName == "" {
|
||||
return fmt.Errorf("PersistentVolumeClaim had no name")
|
||||
}
|
||||
|
||||
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
|
||||
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
// TODO - the actual handling of unbound PVCs will be fixed by late binding design.
|
||||
if pvName == "" {
|
||||
klog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
|
||||
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
|
||||
continue
|
||||
}
|
||||
|
||||
csiSource := pv.Spec.PersistentVolumeSource.CSI
|
||||
if csiSource == nil {
|
||||
klog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
driverName := csiSource.Driver
|
||||
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
|
||||
result[csiSource.VolumeHandle] = volumeLimitKey
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
179
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate_test.go
generated
vendored
Normal file
179
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate_test.go
generated
vendored
Normal file
@ -0,0 +1,179 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
// for pods with CSI pvcs
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "cs-ebs-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runningPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
fits bool
|
||||
test string
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, twoVolPod},
|
||||
filterName: "csi-ebs",
|
||||
maxVols: 4,
|
||||
fits: true,
|
||||
test: "fits when node capacity >= new pods CSI volume",
|
||||
},
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, twoVolPod},
|
||||
filterName: "csi-ebs",
|
||||
maxVols: 2,
|
||||
fits: false,
|
||||
test: "doesn't when node capacity <= pods CSI volume",
|
||||
},
|
||||
}
|
||||
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo("csi-ebs", "csi-ebs"), getFakeCSIPVCInfo("csi-ebs"))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeCSIPVInfo(volumeName, driverName string) FakePersistentVolumeInfo {
|
||||
return FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driverName,
|
||||
VolumeHandle: volumeName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-2"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driverName,
|
||||
VolumeHandle: volumeName + "-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-3"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driverName,
|
||||
VolumeHandle: volumeName + "-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeCSIPVCInfo(volumeName string) FakePersistentVolumeClaimInfo {
|
||||
return FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-2"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName + "-2"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-3"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName + "-3"},
|
||||
},
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -14,23 +14,16 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// GetControllerRef gets pod's owner controller reference from a pod object.
|
||||
func GetControllerRef(pod *v1.Pod) *metav1.OwnerReference {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := range pod.OwnerReferences {
|
||||
ref := &pod.OwnerReferences[i]
|
||||
if ref.Controller != nil && *ref.Controller {
|
||||
return ref
|
||||
}
|
||||
}
|
||||
return nil
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
@ -29,6 +29,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -741,60 +742,12 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pvInfo := func(filterName string) FakePersistentVolumeInfo {
|
||||
return FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pvcInfo := func(filterName string) FakePersistentVolumeClaimInfo {
|
||||
return FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "some" + filterName + "Vol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "someNon" + filterName + "Vol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "pvcWithDeletedPV"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherPVCWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "anotherPVCWithDeletedPV"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: ""},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherUnboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: ""},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
|
||||
// running attachable predicate tests without feature gate and no limit present on nodes
|
||||
for _, test := range tests {
|
||||
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, pvInfo(test.filterName), pvcInfo(test.filterName))
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulercache.NewNodeInfo(test.existingPods...))
|
||||
if err != nil {
|
||||
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
@ -812,7 +765,7 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, pvInfo(test.filterName), pvcInfo(test.filterName))
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
@ -826,6 +779,122 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getFakePVInfo(filterName string) FakePersistentVolumeInfo {
|
||||
return FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getFakePVCInfo(filterName string) FakePersistentVolumeClaimInfo {
|
||||
return FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "some" + filterName + "Vol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "someNon" + filterName + "Vol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "pvcWithDeletedPV"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherPVCWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "anotherPVCWithDeletedPV"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: ""},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherUnboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: ""},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxVolumeFuncM5(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-for-m5-instance",
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelInstanceType: "m5.large",
|
||||
},
|
||||
},
|
||||
}
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType)
|
||||
maxVolume := maxVolumeFunc(node)
|
||||
if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit {
|
||||
t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxVolumeFuncT3(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-for-t3-instance",
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelInstanceType: "t3.medium",
|
||||
},
|
||||
},
|
||||
}
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType)
|
||||
maxVolume := maxVolumeFunc(node)
|
||||
if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit {
|
||||
t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxVolumeFuncR5(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-for-r5-instance",
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelInstanceType: "r5d.xlarge",
|
||||
},
|
||||
},
|
||||
}
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType)
|
||||
maxVolume := maxVolumeFunc(node)
|
||||
if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit {
|
||||
t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxVolumeFuncM4(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-for-m4-instance",
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelInstanceType: "m4.2xlarge",
|
||||
},
|
||||
},
|
||||
}
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType)
|
||||
maxVolume := maxVolumeFunc(node)
|
||||
if maxVolume != volumeutil.DefaultMaxEBSVolumes {
|
||||
t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSVolumes, maxVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, filter string) *schedulercache.NodeInfo {
|
||||
nodeInfo := schedulercache.NewNodeInfo(pods...)
|
||||
node := &v1.Node{
|
||||
@ -849,6 +918,6 @@ func getVolumeLimitKey(filterType string) v1.ResourceName {
|
||||
case AzureDiskVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
default:
|
||||
return ""
|
||||
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
||||
}
|
||||
}
|
||||
|
357
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go
generated
vendored
357
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go
generated
vendored
@ -17,10 +17,11 @@ limitations under the License.
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -38,6 +39,12 @@ type PredicateMetadataFactory struct {
|
||||
podLister algorithm.PodLister
|
||||
}
|
||||
|
||||
// AntiAffinityTerm's topology key value used in predicate metadata
|
||||
type topologyPair struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
// Note that predicateMetadata and matchingPodAntiAffinityTerm need to be declared in the same file
|
||||
// due to the way declarations are processed in predicate declaration unit tests.
|
||||
type matchingPodAntiAffinityTerm struct {
|
||||
@ -45,6 +52,17 @@ type matchingPodAntiAffinityTerm struct {
|
||||
node *v1.Node
|
||||
}
|
||||
|
||||
type podSet map[*v1.Pod]struct{}
|
||||
|
||||
type topologyPairSet map[topologyPair]struct{}
|
||||
|
||||
// topologyPairsMaps keeps topologyPairToAntiAffinityPods and antiAffinityPodToTopologyPairs in sync
|
||||
// as they are the inverse of each others.
|
||||
type topologyPairsMaps struct {
|
||||
topologyPairToPods map[topologyPair]podSet
|
||||
podToTopologyPairs map[string]topologyPairSet
|
||||
}
|
||||
|
||||
// NOTE: When new fields are added/removed or logic is changed, please make sure that
|
||||
// RemovePod, AddPod, and ShallowCopy functions are updated to work with the new changes.
|
||||
type predicateMetadata struct {
|
||||
@ -52,17 +70,17 @@ type predicateMetadata struct {
|
||||
podBestEffort bool
|
||||
podRequest *schedulercache.Resource
|
||||
podPorts []*v1.ContainerPort
|
||||
//key is a pod full name with the anti-affinity rules.
|
||||
matchingAntiAffinityTerms map[string][]matchingPodAntiAffinityTerm
|
||||
// A map of node name to a list of Pods on the node that can potentially match
|
||||
// the affinity rules of the "pod".
|
||||
nodeNameToMatchingAffinityPods map[string][]*v1.Pod
|
||||
// A map of node name to a list of Pods on the node that can potentially match
|
||||
// the anti-affinity rules of the "pod".
|
||||
nodeNameToMatchingAntiAffinityPods map[string][]*v1.Pod
|
||||
serviceAffinityInUse bool
|
||||
serviceAffinityMatchingPodList []*v1.Pod
|
||||
serviceAffinityMatchingPodServices []*v1.Service
|
||||
|
||||
topologyPairsAntiAffinityPodsMap *topologyPairsMaps
|
||||
// A map of topology pairs to a list of Pods that can potentially match
|
||||
// the affinity terms of the "pod" and its inverse.
|
||||
topologyPairsPotentialAffinityPods *topologyPairsMaps
|
||||
// A map of topology pairs to a list of Pods that can potentially match
|
||||
// the anti-affinity terms of the "pod" and its inverse.
|
||||
topologyPairsPotentialAntiAffinityPods *topologyPairsMaps
|
||||
serviceAffinityInUse bool
|
||||
serviceAffinityMatchingPodList []*v1.Pod
|
||||
serviceAffinityMatchingPodServices []*v1.Service
|
||||
// ignoredExtendedResources is a set of extended resource names that will
|
||||
// be ignored in the PodFitsResources predicate.
|
||||
//
|
||||
@ -113,31 +131,74 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
|
||||
if pod == nil {
|
||||
return nil
|
||||
}
|
||||
matchingTerms, err := getMatchingAntiAffinityTerms(pod, nodeNameToInfoMap)
|
||||
// existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity
|
||||
existingPodAntiAffinityMap, err := getTPMapMatchingExistingAntiAffinity(pod, nodeNameToInfoMap)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
affinityPods, antiAffinityPods, err := getPodsMatchingAffinity(pod, nodeNameToInfoMap)
|
||||
// incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity
|
||||
// incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity
|
||||
incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap)
|
||||
if err != nil {
|
||||
glog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err)
|
||||
klog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err)
|
||||
return nil
|
||||
}
|
||||
predicateMetadata := &predicateMetadata{
|
||||
pod: pod,
|
||||
podBestEffort: isPodBestEffort(pod),
|
||||
podRequest: GetResourceRequest(pod),
|
||||
podPorts: schedutil.GetContainerPorts(pod),
|
||||
matchingAntiAffinityTerms: matchingTerms,
|
||||
nodeNameToMatchingAffinityPods: affinityPods,
|
||||
nodeNameToMatchingAntiAffinityPods: antiAffinityPods,
|
||||
pod: pod,
|
||||
podBestEffort: isPodBestEffort(pod),
|
||||
podRequest: GetResourceRequest(pod),
|
||||
podPorts: schedutil.GetContainerPorts(pod),
|
||||
topologyPairsPotentialAffinityPods: incomingPodAffinityMap,
|
||||
topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap,
|
||||
topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap,
|
||||
}
|
||||
for predicateName, precomputeFunc := range predicateMetadataProducers {
|
||||
glog.V(10).Infof("Precompute: %v", predicateName)
|
||||
klog.V(10).Infof("Precompute: %v", predicateName)
|
||||
precomputeFunc(predicateMetadata)
|
||||
}
|
||||
return predicateMetadata
|
||||
}
|
||||
|
||||
// returns a pointer to a new topologyPairsMaps
|
||||
func newTopologyPairsMaps() *topologyPairsMaps {
|
||||
return &topologyPairsMaps{topologyPairToPods: make(map[topologyPair]podSet),
|
||||
podToTopologyPairs: make(map[string]topologyPairSet)}
|
||||
}
|
||||
|
||||
func (topologyPairsMaps *topologyPairsMaps) addTopologyPair(pair topologyPair, pod *v1.Pod) {
|
||||
podFullName := schedutil.GetPodFullName(pod)
|
||||
if topologyPairsMaps.topologyPairToPods[pair] == nil {
|
||||
topologyPairsMaps.topologyPairToPods[pair] = make(map[*v1.Pod]struct{})
|
||||
}
|
||||
topologyPairsMaps.topologyPairToPods[pair][pod] = struct{}{}
|
||||
if topologyPairsMaps.podToTopologyPairs[podFullName] == nil {
|
||||
topologyPairsMaps.podToTopologyPairs[podFullName] = make(map[topologyPair]struct{})
|
||||
}
|
||||
topologyPairsMaps.podToTopologyPairs[podFullName][pair] = struct{}{}
|
||||
}
|
||||
|
||||
func (topologyPairsMaps *topologyPairsMaps) removePod(deletedPod *v1.Pod) {
|
||||
deletedPodFullName := schedutil.GetPodFullName(deletedPod)
|
||||
for pair := range topologyPairsMaps.podToTopologyPairs[deletedPodFullName] {
|
||||
delete(topologyPairsMaps.topologyPairToPods[pair], deletedPod)
|
||||
if len(topologyPairsMaps.topologyPairToPods[pair]) == 0 {
|
||||
delete(topologyPairsMaps.topologyPairToPods, pair)
|
||||
}
|
||||
}
|
||||
delete(topologyPairsMaps.podToTopologyPairs, deletedPodFullName)
|
||||
}
|
||||
|
||||
func (topologyPairsMaps *topologyPairsMaps) appendMaps(toAppend *topologyPairsMaps) {
|
||||
if toAppend == nil {
|
||||
return
|
||||
}
|
||||
for pair := range toAppend.topologyPairToPods {
|
||||
for pod := range toAppend.topologyPairToPods[pair] {
|
||||
topologyPairsMaps.addTopologyPair(pair, pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePod changes predicateMetadata assuming that the given `deletedPod` is
|
||||
// deleted from the system.
|
||||
func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
|
||||
@ -145,35 +206,10 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
|
||||
if deletedPodFullName == schedutil.GetPodFullName(meta.pod) {
|
||||
return fmt.Errorf("deletedPod and meta.pod must not be the same")
|
||||
}
|
||||
// Delete any anti-affinity rule from the deletedPod.
|
||||
delete(meta.matchingAntiAffinityTerms, deletedPodFullName)
|
||||
// Delete pod from the matching affinity or anti-affinity pods if exists.
|
||||
affinity := meta.pod.Spec.Affinity
|
||||
podNodeName := deletedPod.Spec.NodeName
|
||||
if affinity != nil && len(podNodeName) > 0 {
|
||||
if affinity.PodAffinity != nil {
|
||||
for i, p := range meta.nodeNameToMatchingAffinityPods[podNodeName] {
|
||||
if p == deletedPod {
|
||||
s := meta.nodeNameToMatchingAffinityPods[podNodeName]
|
||||
s[i] = s[len(s)-1]
|
||||
s = s[:len(s)-1]
|
||||
meta.nodeNameToMatchingAffinityPods[podNodeName] = s
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
for i, p := range meta.nodeNameToMatchingAntiAffinityPods[podNodeName] {
|
||||
if p == deletedPod {
|
||||
s := meta.nodeNameToMatchingAntiAffinityPods[podNodeName]
|
||||
s[i] = s[len(s)-1]
|
||||
s = s[:len(s)-1]
|
||||
meta.nodeNameToMatchingAntiAffinityPods[podNodeName] = s
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
meta.topologyPairsAntiAffinityPodsMap.removePod(deletedPod)
|
||||
// Delete pod from the matching affinity or anti-affinity topology pairs maps.
|
||||
meta.topologyPairsPotentialAffinityPods.removePod(deletedPod)
|
||||
meta.topologyPairsPotentialAntiAffinityPods.removePod(deletedPod)
|
||||
// All pods in the serviceAffinityMatchingPodList are in the same namespace.
|
||||
// So, if the namespace of the first one is not the same as the namespace of the
|
||||
// deletedPod, we don't need to check the list, as deletedPod isn't in the list.
|
||||
@ -203,46 +239,36 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache
|
||||
return fmt.Errorf("invalid node in nodeInfo")
|
||||
}
|
||||
// Add matching anti-affinity terms of the addedPod to the map.
|
||||
podMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(meta.pod, addedPod, nodeInfo.Node())
|
||||
topologyPairsMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(meta.pod, addedPod, nodeInfo.Node())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podMatchingTerms) > 0 {
|
||||
existingTerms, found := meta.matchingAntiAffinityTerms[addedPodFullName]
|
||||
if found {
|
||||
meta.matchingAntiAffinityTerms[addedPodFullName] = append(existingTerms,
|
||||
podMatchingTerms...)
|
||||
} else {
|
||||
meta.matchingAntiAffinityTerms[addedPodFullName] = podMatchingTerms
|
||||
}
|
||||
}
|
||||
meta.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps)
|
||||
// Add the pod to nodeNameToMatchingAffinityPods and nodeNameToMatchingAntiAffinityPods if needed.
|
||||
affinity := meta.pod.Spec.Affinity
|
||||
podNodeName := addedPod.Spec.NodeName
|
||||
if affinity != nil && len(podNodeName) > 0 {
|
||||
podNode := nodeInfo.Node()
|
||||
// It is assumed that when the added pod matches affinity of the meta.pod, all the terms must match,
|
||||
// this should be changed when the implementation of targetPodMatchesAffinityOfPod/podMatchesAffinityTermProperties
|
||||
// is changed
|
||||
if targetPodMatchesAffinityOfPod(meta.pod, addedPod) {
|
||||
found := false
|
||||
for _, p := range meta.nodeNameToMatchingAffinityPods[podNodeName] {
|
||||
if p == addedPod {
|
||||
found = true
|
||||
break
|
||||
affinityTerms := GetPodAffinityTerms(affinity.PodAffinity)
|
||||
for _, term := range affinityTerms {
|
||||
if topologyValue, ok := podNode.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
meta.topologyPairsPotentialAffinityPods.addTopologyPair(pair, addedPod)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
meta.nodeNameToMatchingAffinityPods[podNodeName] = append(meta.nodeNameToMatchingAffinityPods[podNodeName], addedPod)
|
||||
}
|
||||
}
|
||||
if targetPodMatchesAntiAffinityOfPod(meta.pod, addedPod) {
|
||||
found := false
|
||||
for _, p := range meta.nodeNameToMatchingAntiAffinityPods[podNodeName] {
|
||||
if p == addedPod {
|
||||
found = true
|
||||
break
|
||||
antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity)
|
||||
for _, term := range antiAffinityTerms {
|
||||
if topologyValue, ok := podNode.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
meta.topologyPairsPotentialAntiAffinityPods.addTopologyPair(pair, addedPod)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
meta.nodeNameToMatchingAntiAffinityPods[podNodeName] = append(meta.nodeNameToMatchingAntiAffinityPods[podNodeName], addedPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
// If addedPod is in the same namespace as the meta.pod, update the list
|
||||
@ -268,18 +294,12 @@ func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {
|
||||
ignoredExtendedResources: meta.ignoredExtendedResources,
|
||||
}
|
||||
newPredMeta.podPorts = append([]*v1.ContainerPort(nil), meta.podPorts...)
|
||||
newPredMeta.matchingAntiAffinityTerms = map[string][]matchingPodAntiAffinityTerm{}
|
||||
for k, v := range meta.matchingAntiAffinityTerms {
|
||||
newPredMeta.matchingAntiAffinityTerms[k] = append([]matchingPodAntiAffinityTerm(nil), v...)
|
||||
}
|
||||
newPredMeta.nodeNameToMatchingAffinityPods = make(map[string][]*v1.Pod)
|
||||
for k, v := range meta.nodeNameToMatchingAffinityPods {
|
||||
newPredMeta.nodeNameToMatchingAffinityPods[k] = append([]*v1.Pod(nil), v...)
|
||||
}
|
||||
newPredMeta.nodeNameToMatchingAntiAffinityPods = make(map[string][]*v1.Pod)
|
||||
for k, v := range meta.nodeNameToMatchingAntiAffinityPods {
|
||||
newPredMeta.nodeNameToMatchingAntiAffinityPods[k] = append([]*v1.Pod(nil), v...)
|
||||
}
|
||||
newPredMeta.topologyPairsPotentialAffinityPods = newTopologyPairsMaps()
|
||||
newPredMeta.topologyPairsPotentialAffinityPods.appendMaps(meta.topologyPairsPotentialAffinityPods)
|
||||
newPredMeta.topologyPairsPotentialAntiAffinityPods = newTopologyPairsMaps()
|
||||
newPredMeta.topologyPairsPotentialAntiAffinityPods.appendMaps(meta.topologyPairsPotentialAntiAffinityPods)
|
||||
newPredMeta.topologyPairsAntiAffinityPodsMap = newTopologyPairsMaps()
|
||||
newPredMeta.topologyPairsAntiAffinityPodsMap.appendMaps(meta.topologyPairsAntiAffinityPodsMap)
|
||||
newPredMeta.serviceAffinityMatchingPodServices = append([]*v1.Service(nil),
|
||||
meta.serviceAffinityMatchingPodServices...)
|
||||
newPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil),
|
||||
@ -310,8 +330,8 @@ func getAffinityTermProperties(pod *v1.Pod, terms []v1.PodAffinityTerm) (propert
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// podMatchesAffinityTermProperties return true IFF the given pod matches all the given properties.
|
||||
func podMatchesAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool {
|
||||
// podMatchesAllAffinityTermProperties returns true IFF the given pod matches all the given properties.
|
||||
func podMatchesAllAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool {
|
||||
if len(properties) == 0 {
|
||||
return false
|
||||
}
|
||||
@ -323,34 +343,93 @@ func podMatchesAffinityTermProperties(pod *v1.Pod, properties []*affinityTermPro
|
||||
return true
|
||||
}
|
||||
|
||||
// getPodsMatchingAffinity finds existing Pods that match affinity terms of the given "pod".
|
||||
// It ignores topology. It returns a set of Pods that are checked later by the affinity
|
||||
// predicate. With this set of pods available, the affinity predicate does not
|
||||
// need to check all the pods in the cluster.
|
||||
func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (affinityPods map[string][]*v1.Pod, antiAffinityPods map[string][]*v1.Pod, err error) {
|
||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) {
|
||||
return nil, nil, nil
|
||||
// podMatchesAnyAffinityTermProperties returns true if the given pod matches any given property.
|
||||
func podMatchesAnyAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool {
|
||||
if len(properties) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, property := range properties {
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, property.namespaces, property.selector) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node:
|
||||
// (1) Whether it has PodAntiAffinity
|
||||
// (2) Whether any AffinityTerm matches the incoming pod
|
||||
func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (*topologyPairsMaps, error) {
|
||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||
for name := range nodeInfoMap {
|
||||
allNodeNames = append(allNodeNames, name)
|
||||
}
|
||||
|
||||
var lock sync.Mutex
|
||||
var firstError error
|
||||
affinityPods = make(map[string][]*v1.Pod)
|
||||
antiAffinityPods = make(map[string][]*v1.Pod)
|
||||
appendResult := func(nodeName string, affPods, antiAffPods []*v1.Pod) {
|
||||
|
||||
topologyMaps := newTopologyPairsMaps()
|
||||
|
||||
appendTopologyPairsMaps := func(toAppend *topologyPairsMaps) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if len(affPods) > 0 {
|
||||
affinityPods[nodeName] = affPods
|
||||
topologyMaps.appendMaps(toAppend)
|
||||
}
|
||||
catchError := func(err error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
if len(antiAffPods) > 0 {
|
||||
antiAffinityPods[nodeName] = antiAffPods
|
||||
}
|
||||
|
||||
processNode := func(i int) {
|
||||
nodeInfo := nodeInfoMap[allNodeNames[i]]
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
catchError(fmt.Errorf("node not found"))
|
||||
return
|
||||
}
|
||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, node)
|
||||
if err != nil {
|
||||
catchError(err)
|
||||
return
|
||||
}
|
||||
appendTopologyPairsMaps(existingPodTopologyMaps)
|
||||
}
|
||||
}
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
|
||||
return topologyMaps, firstError
|
||||
}
|
||||
|
||||
// getTPMapMatchingIncomingAffinityAntiAffinity finds existing Pods that match affinity terms of the given "pod".
|
||||
// It returns a topologyPairsMaps that are checked later by the affinity
|
||||
// predicate. With this topologyPairsMaps available, the affinity predicate does not
|
||||
// need to check all the pods in the cluster.
|
||||
func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) {
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) {
|
||||
return newTopologyPairsMaps(), newTopologyPairsMaps(), nil
|
||||
}
|
||||
|
||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||
for name := range nodeInfoMap {
|
||||
allNodeNames = append(allNodeNames, name)
|
||||
}
|
||||
|
||||
var lock sync.Mutex
|
||||
var firstError error
|
||||
topologyPairsAffinityPodsMaps = newTopologyPairsMaps()
|
||||
topologyPairsAntiAffinityPodsMaps = newTopologyPairsMaps()
|
||||
appendResult := func(nodeName string, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps *topologyPairsMaps) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if len(nodeTopologyPairsAffinityPodsMaps.topologyPairToPods) > 0 {
|
||||
topologyPairsAffinityPodsMaps.appendMaps(nodeTopologyPairsAffinityPodsMaps)
|
||||
}
|
||||
if len(nodeTopologyPairsAntiAffinityPodsMaps.topologyPairToPods) > 0 {
|
||||
topologyPairsAntiAffinityPodsMaps.appendMaps(nodeTopologyPairsAntiAffinityPodsMaps)
|
||||
}
|
||||
}
|
||||
|
||||
@ -362,14 +441,12 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache
|
||||
}
|
||||
}
|
||||
|
||||
affinityProperties, err := getAffinityTermProperties(pod, GetPodAffinityTerms(affinity.PodAffinity))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
antiAffinityProperties, err := getAffinityTermProperties(pod, GetPodAntiAffinityTerms(affinity.PodAntiAffinity))
|
||||
affinityTerms := GetPodAffinityTerms(affinity.PodAffinity)
|
||||
affinityProperties, err := getAffinityTermProperties(pod, affinityTerms)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity)
|
||||
|
||||
processNode := func(i int) {
|
||||
nodeInfo := nodeInfoMap[allNodeNames[i]]
|
||||
@ -378,28 +455,44 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache
|
||||
catchError(fmt.Errorf("nodeInfo.Node is nil"))
|
||||
return
|
||||
}
|
||||
affPods := make([]*v1.Pod, 0, len(nodeInfo.Pods()))
|
||||
antiAffPods := make([]*v1.Pod, 0, len(nodeInfo.Pods()))
|
||||
nodeTopologyPairsAffinityPodsMaps := newTopologyPairsMaps()
|
||||
nodeTopologyPairsAntiAffinityPodsMaps := newTopologyPairsMaps()
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
// Check affinity properties.
|
||||
if podMatchesAffinityTermProperties(existingPod, affinityProperties) {
|
||||
affPods = append(affPods, existingPod)
|
||||
if podMatchesAllAffinityTermProperties(existingPod, affinityProperties) {
|
||||
for _, term := range affinityTerms {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
nodeTopologyPairsAffinityPodsMaps.addTopologyPair(pair, existingPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check anti-affinity properties.
|
||||
if podMatchesAffinityTermProperties(existingPod, antiAffinityProperties) {
|
||||
antiAffPods = append(antiAffPods, existingPod)
|
||||
for _, term := range antiAffinityTerms {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
catchError(err)
|
||||
return
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
nodeTopologyPairsAntiAffinityPodsMaps.addTopologyPair(pair, existingPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(antiAffPods) > 0 || len(affPods) > 0 {
|
||||
appendResult(node.Name, affPods, antiAffPods)
|
||||
if len(nodeTopologyPairsAffinityPodsMaps.topologyPairToPods) > 0 || len(nodeTopologyPairsAntiAffinityPodsMaps.topologyPairToPods) > 0 {
|
||||
appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps)
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
return affinityPods, antiAffinityPods, firstError
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
|
||||
return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError
|
||||
}
|
||||
|
||||
// podMatchesAffinity returns true if "targetPod" matches any affinity rule of
|
||||
// "pod". Similar to getPodsMatchingAffinity, this function does not check topology.
|
||||
// targetPodMatchesAffinityOfPod returns true if "targetPod" matches ALL affinity terms of
|
||||
// "pod". This function does not check topology.
|
||||
// So, whether the targetPod actually matches or not needs further checks for a specific
|
||||
// node.
|
||||
func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
@ -409,14 +502,14 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
}
|
||||
affinityProperties, err := getAffinityTermProperties(pod, GetPodAffinityTerms(affinity.PodAffinity))
|
||||
if err != nil {
|
||||
glog.Errorf("error in getting affinity properties of Pod %v", pod.Name)
|
||||
klog.Errorf("error in getting affinity properties of Pod %v", pod.Name)
|
||||
return false
|
||||
}
|
||||
return podMatchesAffinityTermProperties(targetPod, affinityProperties)
|
||||
return podMatchesAllAffinityTermProperties(targetPod, affinityProperties)
|
||||
}
|
||||
|
||||
// targetPodMatchesAntiAffinityOfPod returns true if "targetPod" matches any anti-affinity
|
||||
// rule of "pod". Similar to getPodsMatchingAffinity, this function does not check topology.
|
||||
// targetPodMatchesAntiAffinityOfPod returns true if "targetPod" matches ANY anti-affinity
|
||||
// term of "pod". This function does not check topology.
|
||||
// So, whether the targetPod actually matches or not needs further checks for a specific
|
||||
// node.
|
||||
func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
@ -426,8 +519,8 @@ func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
}
|
||||
properties, err := getAffinityTermProperties(pod, GetPodAntiAffinityTerms(affinity.PodAntiAffinity))
|
||||
if err != nil {
|
||||
glog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name)
|
||||
klog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name)
|
||||
return false
|
||||
}
|
||||
return podMatchesAffinityTermProperties(targetPod, properties)
|
||||
return podMatchesAnyAffinityTermProperties(targetPod, properties)
|
||||
}
|
||||
|
527
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata_test.go
generated
vendored
527
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata_test.go
generated
vendored
@ -28,42 +28,6 @@ import (
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
// sortableAntiAffinityTerms lets us to sort anti-affinity terms.
|
||||
type sortableAntiAffinityTerms []matchingPodAntiAffinityTerm
|
||||
|
||||
// Less establishes some ordering between two matchingPodAntiAffinityTerms for
|
||||
// sorting.
|
||||
func (s sortableAntiAffinityTerms) Less(i, j int) bool {
|
||||
t1, t2 := s[i], s[j]
|
||||
if t1.node.Name != t2.node.Name {
|
||||
return t1.node.Name < t2.node.Name
|
||||
}
|
||||
if len(t1.term.Namespaces) != len(t2.term.Namespaces) {
|
||||
return len(t1.term.Namespaces) < len(t2.term.Namespaces)
|
||||
}
|
||||
if t1.term.TopologyKey != t2.term.TopologyKey {
|
||||
return t1.term.TopologyKey < t2.term.TopologyKey
|
||||
}
|
||||
if len(t1.term.LabelSelector.MatchLabels) != len(t2.term.LabelSelector.MatchLabels) {
|
||||
return len(t1.term.LabelSelector.MatchLabels) < len(t2.term.LabelSelector.MatchLabels)
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s sortableAntiAffinityTerms) Len() int { return len(s) }
|
||||
func (s sortableAntiAffinityTerms) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
var _ = sort.Interface(sortableAntiAffinityTerms{})
|
||||
|
||||
func sortAntiAffinityTerms(terms map[string][]matchingPodAntiAffinityTerm) {
|
||||
for k, v := range terms {
|
||||
sortableTerms := sortableAntiAffinityTerms(v)
|
||||
sort.Sort(sortableTerms)
|
||||
terms[k] = sortableTerms
|
||||
}
|
||||
}
|
||||
|
||||
// sortablePods lets us to sort pods.
|
||||
type sortablePods []*v1.Pod
|
||||
|
||||
@ -88,13 +52,6 @@ func (s sortableServices) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
var _ = sort.Interface(&sortableServices{})
|
||||
|
||||
func sortNodePodMap(np map[string][]*v1.Pod) {
|
||||
for _, pl := range np {
|
||||
sortablePods := sortablePods(pl)
|
||||
sort.Sort(sortablePods)
|
||||
}
|
||||
}
|
||||
|
||||
// predicateMetadataEquivalent returns true if the two metadata are equivalent.
|
||||
// Note: this function does not compare podRequest.
|
||||
func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
||||
@ -113,20 +70,19 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
||||
for !reflect.DeepEqual(meta1.podPorts, meta2.podPorts) {
|
||||
return fmt.Errorf("podPorts are not equal")
|
||||
}
|
||||
sortAntiAffinityTerms(meta1.matchingAntiAffinityTerms)
|
||||
sortAntiAffinityTerms(meta2.matchingAntiAffinityTerms)
|
||||
if !reflect.DeepEqual(meta1.matchingAntiAffinityTerms, meta2.matchingAntiAffinityTerms) {
|
||||
return fmt.Errorf("matchingAntiAffinityTerms are not euqal")
|
||||
if !reflect.DeepEqual(meta1.topologyPairsPotentialAffinityPods, meta2.topologyPairsPotentialAffinityPods) {
|
||||
return fmt.Errorf("topologyPairsPotentialAffinityPods are not equal")
|
||||
}
|
||||
sortNodePodMap(meta1.nodeNameToMatchingAffinityPods)
|
||||
sortNodePodMap(meta2.nodeNameToMatchingAffinityPods)
|
||||
if !reflect.DeepEqual(meta1.nodeNameToMatchingAffinityPods, meta2.nodeNameToMatchingAffinityPods) {
|
||||
return fmt.Errorf("nodeNameToMatchingAffinityPods are not euqal")
|
||||
if !reflect.DeepEqual(meta1.topologyPairsPotentialAntiAffinityPods, meta2.topologyPairsPotentialAntiAffinityPods) {
|
||||
return fmt.Errorf("topologyPairsPotentialAntiAffinityPods are not equal")
|
||||
}
|
||||
sortNodePodMap(meta1.nodeNameToMatchingAntiAffinityPods)
|
||||
sortNodePodMap(meta2.nodeNameToMatchingAntiAffinityPods)
|
||||
if !reflect.DeepEqual(meta1.nodeNameToMatchingAntiAffinityPods, meta2.nodeNameToMatchingAntiAffinityPods) {
|
||||
return fmt.Errorf("nodeNameToMatchingAntiAffinityPods are not euqal")
|
||||
if !reflect.DeepEqual(meta1.topologyPairsAntiAffinityPodsMap.podToTopologyPairs,
|
||||
meta2.topologyPairsAntiAffinityPodsMap.podToTopologyPairs) {
|
||||
return fmt.Errorf("topologyPairsAntiAffinityPodsMap.podToTopologyPairs are not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(meta1.topologyPairsAntiAffinityPodsMap.topologyPairToPods,
|
||||
meta2.topologyPairsAntiAffinityPodsMap.topologyPairToPods) {
|
||||
return fmt.Errorf("topologyPairsAntiAffinityPodsMap.topologyPairToPods are not equal")
|
||||
}
|
||||
if meta1.serviceAffinityInUse {
|
||||
sortablePods1 := sortablePods(meta1.serviceAffinityMatchingPodList)
|
||||
@ -236,7 +192,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
name string
|
||||
pendingPod *v1.Pod
|
||||
addedPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
@ -244,7 +200,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
services []*v1.Service
|
||||
}{
|
||||
{
|
||||
description: "no anti-affinity or service affinity exist",
|
||||
name: "no anti-affinity or service affinity exist",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@ -267,7 +223,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metadata anti-affinity terms are updated correctly after adding and removing a pod",
|
||||
name: "metadata anti-affinity terms are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@ -300,7 +256,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metadata service-affinity data are updated correctly after adding and removing a pod",
|
||||
name: "metadata service-affinity data are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@ -324,7 +280,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metadata anti-affinity terms and service affinity data are updated correctly after adding and removing a pod",
|
||||
name: "metadata anti-affinity terms and service affinity data are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@ -358,7 +314,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metadata matching pod affinity and anti-affinity are updated correctly after adding and removing a pod",
|
||||
name: "metadata matching pod affinity and anti-affinity are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
@ -395,44 +351,46 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
allPodLister := schedulertesting.FakePodLister(append(test.existingPods, test.addedPod))
|
||||
// getMeta creates predicate meta data given the list of pods.
|
||||
getMeta := func(lister schedulertesting.FakePodLister) (*predicateMetadata, map[string]*schedulercache.NodeInfo) {
|
||||
nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(lister, test.nodes)
|
||||
// nodeList is a list of non-pointer nodes to feed to FakeNodeListInfo.
|
||||
nodeList := []v1.Node{}
|
||||
for _, n := range test.nodes {
|
||||
nodeList = append(nodeList, *n)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
allPodLister := schedulertesting.FakePodLister(append(test.existingPods, test.addedPod))
|
||||
// getMeta creates predicate meta data given the list of pods.
|
||||
getMeta := func(lister schedulertesting.FakePodLister) (*predicateMetadata, map[string]*schedulercache.NodeInfo) {
|
||||
nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(lister, test.nodes)
|
||||
// nodeList is a list of non-pointer nodes to feed to FakeNodeListInfo.
|
||||
nodeList := []v1.Node{}
|
||||
for _, n := range test.nodes {
|
||||
nodeList = append(nodeList, *n)
|
||||
}
|
||||
_, precompute := NewServiceAffinityPredicate(lister, schedulertesting.FakeServiceLister(test.services), FakeNodeListInfo(nodeList), nil)
|
||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
||||
pmf := PredicateMetadataFactory{lister}
|
||||
meta := pmf.GetMetadata(test.pendingPod, nodeInfoMap)
|
||||
return meta.(*predicateMetadata), nodeInfoMap
|
||||
}
|
||||
_, precompute := NewServiceAffinityPredicate(lister, schedulertesting.FakeServiceLister(test.services), FakeNodeListInfo(nodeList), nil)
|
||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
||||
pmf := PredicateMetadataFactory{lister}
|
||||
meta := pmf.GetMetadata(test.pendingPod, nodeInfoMap)
|
||||
return meta.(*predicateMetadata), nodeInfoMap
|
||||
}
|
||||
|
||||
// allPodsMeta is meta data produced when all pods, including test.addedPod
|
||||
// are given to the metadata producer.
|
||||
allPodsMeta, _ := getMeta(allPodLister)
|
||||
// existingPodsMeta1 is meta data produced for test.existingPods (without test.addedPod).
|
||||
existingPodsMeta1, nodeInfoMap := getMeta(schedulertesting.FakePodLister(test.existingPods))
|
||||
// Add test.addedPod to existingPodsMeta1 and make sure meta is equal to allPodsMeta
|
||||
nodeInfo := nodeInfoMap[test.addedPod.Spec.NodeName]
|
||||
if err := existingPodsMeta1.AddPod(test.addedPod, nodeInfo); err != nil {
|
||||
t.Errorf("test [%v]: error adding pod to meta: %v", test.description, err)
|
||||
}
|
||||
if err := predicateMetadataEquivalent(allPodsMeta, existingPodsMeta1); err != nil {
|
||||
t.Errorf("test [%v]: meta data are not equivalent: %v", test.description, err)
|
||||
}
|
||||
// Remove the added pod and from existingPodsMeta1 an make sure it is equal
|
||||
// to meta generated for existing pods.
|
||||
existingPodsMeta2, _ := getMeta(schedulertesting.FakePodLister(test.existingPods))
|
||||
if err := existingPodsMeta1.RemovePod(test.addedPod); err != nil {
|
||||
t.Errorf("test [%v]: error removing pod from meta: %v", test.description, err)
|
||||
}
|
||||
if err := predicateMetadataEquivalent(existingPodsMeta1, existingPodsMeta2); err != nil {
|
||||
t.Errorf("test [%v]: meta data are not equivalent: %v", test.description, err)
|
||||
}
|
||||
// allPodsMeta is meta data produced when all pods, including test.addedPod
|
||||
// are given to the metadata producer.
|
||||
allPodsMeta, _ := getMeta(allPodLister)
|
||||
// existingPodsMeta1 is meta data produced for test.existingPods (without test.addedPod).
|
||||
existingPodsMeta1, nodeInfoMap := getMeta(schedulertesting.FakePodLister(test.existingPods))
|
||||
// Add test.addedPod to existingPodsMeta1 and make sure meta is equal to allPodsMeta
|
||||
nodeInfo := nodeInfoMap[test.addedPod.Spec.NodeName]
|
||||
if err := existingPodsMeta1.AddPod(test.addedPod, nodeInfo); err != nil {
|
||||
t.Errorf("error adding pod to meta: %v", err)
|
||||
}
|
||||
if err := predicateMetadataEquivalent(allPodsMeta, existingPodsMeta1); err != nil {
|
||||
t.Errorf("meta data are not equivalent: %v", err)
|
||||
}
|
||||
// Remove the added pod and from existingPodsMeta1 an make sure it is equal
|
||||
// to meta generated for existing pods.
|
||||
existingPodsMeta2, _ := getMeta(schedulertesting.FakePodLister(test.existingPods))
|
||||
if err := existingPodsMeta1.RemovePod(test.addedPod); err != nil {
|
||||
t.Errorf("error removing pod from meta: %v", err)
|
||||
}
|
||||
if err := predicateMetadataEquivalent(existingPodsMeta1, existingPodsMeta2); err != nil {
|
||||
t.Errorf("meta data are not equivalent: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -463,52 +421,93 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) {
|
||||
HostIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
matchingAntiAffinityTerms: map[string][]matchingPodAntiAffinityTerm{
|
||||
"term1": {
|
||||
{
|
||||
term: &v1.PodAffinityTerm{TopologyKey: "node"},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
},
|
||||
topologyPairsAntiAffinityPodsMap: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "name", value: "machine1"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
}: struct{}{},
|
||||
},
|
||||
{key: "name", value: "machine2"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
}: struct{}{},
|
||||
},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"p2_": {
|
||||
topologyPair{key: "name", value: "machine1"}: struct{}{},
|
||||
},
|
||||
"p1_": {
|
||||
topologyPair{key: "name", value: "machine2"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeNameToMatchingAffinityPods: map[string][]*v1.Pod{
|
||||
"nodeA": {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
topologyPairsPotentialAffinityPods: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "name", value: "nodeA"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
}: struct{}{},
|
||||
},
|
||||
{key: "name", value: "nodeC"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeC",
|
||||
},
|
||||
}: struct{}{},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
}: struct{}{},
|
||||
},
|
||||
},
|
||||
"nodeC": {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeC",
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"p1_": {
|
||||
topologyPair{key: "name", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
"p2_": {
|
||||
topologyPair{key: "name", value: "nodeC"}: struct{}{},
|
||||
},
|
||||
"p6_": {
|
||||
topologyPair{key: "name", value: "nodeC"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeNameToMatchingAntiAffinityPods: map[string][]*v1.Pod{
|
||||
"nodeN": {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeN"},
|
||||
topologyPairsPotentialAntiAffinityPods: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "name", value: "nodeN"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeN"},
|
||||
}: struct{}{},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeM",
|
||||
},
|
||||
}: struct{}{},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p3"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeM",
|
||||
},
|
||||
}: struct{}{},
|
||||
},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeM",
|
||||
},
|
||||
},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p3"},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeM",
|
||||
},
|
||||
{key: "name", value: "nodeM"}: {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeM"},
|
||||
}: struct{}{},
|
||||
},
|
||||
},
|
||||
"nodeM": {
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeM"},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"p1_": {
|
||||
topologyPair{key: "name", value: "nodeN"}: struct{}{},
|
||||
},
|
||||
"p2_": {
|
||||
topologyPair{key: "name", value: "nodeN"}: struct{}{},
|
||||
},
|
||||
"p3_": {
|
||||
topologyPair{key: "name", value: "nodeN"}: struct{}{},
|
||||
},
|
||||
"p6_": {
|
||||
topologyPair{key: "name", value: "nodeM"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -526,3 +525,269 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) {
|
||||
t.Errorf("Copy is not equal to source!")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetTPMapMatchingIncomingAffinityAntiAffinity tests against method getTPMapMatchingIncomingAffinityAntiAffinity
|
||||
// on Anti Affinity cases
|
||||
func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
|
||||
newPodAffinityTerms := func(keys ...string) []v1.PodAffinityTerm {
|
||||
var terms []v1.PodAffinityTerm
|
||||
for _, key := range keys {
|
||||
terms = append(terms, v1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "hostname",
|
||||
})
|
||||
}
|
||||
return terms
|
||||
}
|
||||
newPod := func(labels ...string) *v1.Pod {
|
||||
labelMap := make(map[string]string)
|
||||
for _, l := range labels {
|
||||
labelMap[l] = ""
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "normal", Labels: labelMap},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
}
|
||||
}
|
||||
normalPodA := newPod("aaa")
|
||||
normalPodB := newPod("bbb")
|
||||
normalPodAB := newPod("aaa", "bbb")
|
||||
nodeA := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"hostname": "nodeA"}}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
existingPods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
pod *v1.Pod
|
||||
wantAffinityPodsMaps *topologyPairsMaps
|
||||
wantAntiAffinityPodsMaps *topologyPairsMaps
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil test",
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "aaa-normal"},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
},
|
||||
{
|
||||
name: "incoming pod without affinity/anti-affinity causes a no-op",
|
||||
existingPods: []*v1.Pod{normalPodA},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "aaa-normal"},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
},
|
||||
{
|
||||
name: "no pod has label that violates incoming pod's affinity and anti-affinity",
|
||||
existingPods: []*v1.Pod{normalPodB},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "aaa-anti"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
},
|
||||
{
|
||||
name: "existing pod matches incoming pod's affinity and anti-affinity - single term case",
|
||||
existingPods: []*v1.Pod{normalPodA},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing pod matches incoming pod's affinity and anti-affinity - mutiple terms case",
|
||||
existingPods: []*v1.Pod{normalPodAB},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing pod not match incoming pod's affinity but matches anti-affinity",
|
||||
existingPods: []*v1.Pod{normalPodA},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "incoming pod's anti-affinity has more than one term - existing pod violates partial term - case 1",
|
||||
existingPods: []*v1.Pod{normalPodAB},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anaffi-antiaffiti"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "ccc"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "ccc"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "incoming pod's anti-affinity has more than one term - existing pod violates partial term - case 2",
|
||||
existingPods: []*v1.Pod{normalPodB},
|
||||
nodes: []*v1.Node{nodeA},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAffinityPodsMaps: newTopologyPairsMaps(),
|
||||
wantAntiAffinityPodsMaps: &topologyPairsMaps{
|
||||
topologyPairToPods: map[topologyPair]podSet{
|
||||
{key: "hostname", value: "nodeA"}: {normalPodB: struct{}{}},
|
||||
},
|
||||
podToTopologyPairs: map[string]topologyPairSet{
|
||||
"normal_": {
|
||||
topologyPair{key: "hostname", value: "nodeA"}: struct{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes)
|
||||
|
||||
gotAffinityPodsMaps, gotAntiAffinityPodsMaps, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, nodeInfoMap)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAffinityPodsMaps, tt.wantAffinityPodsMaps) {
|
||||
t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() gotAffinityPodsMaps = %#v, want %#v", gotAffinityPodsMaps, tt.wantAffinityPodsMaps)
|
||||
}
|
||||
if !reflect.DeepEqual(gotAntiAffinityPodsMaps, tt.wantAntiAffinityPodsMaps) {
|
||||
t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() gotAntiAffinityPodsMaps = %#v, want %#v", gotAntiAffinityPodsMaps, tt.wantAntiAffinityPodsMaps)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
372
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
372
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
@ -20,10 +20,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
@ -36,13 +36,13 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
@ -84,6 +84,8 @@ const (
|
||||
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
|
||||
// MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount.
|
||||
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
|
||||
// MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached
|
||||
MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred"
|
||||
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
|
||||
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
|
||||
// CheckNodeMemoryPressurePred defines the name of predicate CheckNodeMemoryPressure.
|
||||
@ -93,10 +95,6 @@ const (
|
||||
// CheckNodePIDPressurePred defines the name of predicate CheckNodePIDPressure.
|
||||
CheckNodePIDPressurePred = "CheckNodePIDPressure"
|
||||
|
||||
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
|
||||
// Amazon recommends no more than 40; the system root volume uses at least one.
|
||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
|
||||
DefaultMaxEBSVolumes = 39
|
||||
// DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE
|
||||
// GCE instances can have up to 16 PD volumes attached.
|
||||
DefaultMaxGCEPDVolumes = 16
|
||||
@ -134,7 +132,7 @@ var (
|
||||
GeneralPred, HostNamePred, PodFitsHostPortsPred,
|
||||
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
|
||||
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
|
||||
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred,
|
||||
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
|
||||
MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
|
||||
CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
|
||||
)
|
||||
@ -291,7 +289,7 @@ func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *sch
|
||||
type MaxPDVolumeCountChecker struct {
|
||||
filter VolumeFilter
|
||||
volumeLimitKey v1.ResourceName
|
||||
maxVolumes int
|
||||
maxVolumeFunc func(node *v1.Node) int
|
||||
pvInfo PersistentVolumeInfo
|
||||
pvcInfo PersistentVolumeClaimInfo
|
||||
|
||||
@ -317,7 +315,6 @@ type VolumeFilter struct {
|
||||
func NewMaxPDVolumeCountPredicate(
|
||||
filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
|
||||
var filter VolumeFilter
|
||||
var maxVolumes int
|
||||
var volumeLimitKey v1.ResourceName
|
||||
|
||||
switch filterName {
|
||||
@ -325,17 +322,14 @@ func NewMaxPDVolumeCountPredicate(
|
||||
case EBSVolumeFilterType:
|
||||
filter = EBSVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||
maxVolumes = getMaxVols(DefaultMaxEBSVolumes)
|
||||
case GCEPDVolumeFilterType:
|
||||
filter = GCEPDVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||
maxVolumes = getMaxVols(DefaultMaxGCEPDVolumes)
|
||||
case AzureDiskVolumeFilterType:
|
||||
filter = AzureDiskVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
maxVolumes = getMaxVols(DefaultMaxAzureDiskVolumes)
|
||||
default:
|
||||
glog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType,
|
||||
klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType,
|
||||
GCEPDVolumeFilterType, AzureDiskVolumeFilterType)
|
||||
return nil
|
||||
|
||||
@ -343,7 +337,7 @@ func NewMaxPDVolumeCountPredicate(
|
||||
c := &MaxPDVolumeCountChecker{
|
||||
filter: filter,
|
||||
volumeLimitKey: volumeLimitKey,
|
||||
maxVolumes: maxVolumes,
|
||||
maxVolumeFunc: getMaxVolumeFunc(filterName),
|
||||
pvInfo: pvInfo,
|
||||
pvcInfo: pvcInfo,
|
||||
randomVolumeIDPrefix: rand.String(32),
|
||||
@ -352,19 +346,52 @@ func NewMaxPDVolumeCountPredicate(
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
// getMaxVols checks the max PD volumes environment variable, otherwise returning a default value
|
||||
func getMaxVols(defaultVal int) int {
|
||||
func getMaxVolumeFunc(filterName string) func(node *v1.Node) int {
|
||||
return func(node *v1.Node) int {
|
||||
maxVolumesFromEnv := getMaxVolLimitFromEnv()
|
||||
if maxVolumesFromEnv > 0 {
|
||||
return maxVolumesFromEnv
|
||||
}
|
||||
|
||||
var nodeInstanceType string
|
||||
for k, v := range node.ObjectMeta.Labels {
|
||||
if k == kubeletapis.LabelInstanceType {
|
||||
nodeInstanceType = v
|
||||
}
|
||||
}
|
||||
switch filterName {
|
||||
case EBSVolumeFilterType:
|
||||
return getMaxEBSVolume(nodeInstanceType)
|
||||
case GCEPDVolumeFilterType:
|
||||
return DefaultMaxGCEPDVolumes
|
||||
case AzureDiskVolumeFilterType:
|
||||
return DefaultMaxAzureDiskVolumes
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getMaxEBSVolume(nodeInstanceType string) int {
|
||||
if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok {
|
||||
return volumeutil.DefaultMaxEBSNitroVolumeLimit
|
||||
}
|
||||
return volumeutil.DefaultMaxEBSVolumes
|
||||
}
|
||||
|
||||
// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value
|
||||
func getMaxVolLimitFromEnv() int {
|
||||
if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" {
|
||||
if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil {
|
||||
glog.Errorf("Unable to parse maximum PD volumes value, using default of %v: %v", defaultVal, err)
|
||||
klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err)
|
||||
} else if parsedMaxVols <= 0 {
|
||||
glog.Errorf("Maximum PD volumes must be a positive value, using default of %v", defaultVal)
|
||||
klog.Errorf("Maximum PD volumes must be a positive value, using default ")
|
||||
} else {
|
||||
return parsedMaxVols
|
||||
}
|
||||
}
|
||||
|
||||
return defaultVal
|
||||
return -1
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error {
|
||||
@ -386,7 +413,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
|
||||
if err != nil || pvc == nil {
|
||||
// if the PVC is not found, log the error and count the PV towards the PV limit
|
||||
glog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)
|
||||
klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)
|
||||
filteredVolumes[pvID] = true
|
||||
continue
|
||||
}
|
||||
@ -397,7 +424,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||
// it was forcefully unbound by admin. The pod can still use the
|
||||
// original PV where it was bound to -> log the error and count
|
||||
// the PV towards the PV limit
|
||||
glog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
|
||||
klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
|
||||
filteredVolumes[pvID] = true
|
||||
continue
|
||||
}
|
||||
@ -406,7 +433,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||
if err != nil || pv == nil {
|
||||
// if the PV is not found, log the error
|
||||
// and count the PV towards the PV limit
|
||||
glog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
|
||||
klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
|
||||
filteredVolumes[pvID] = true
|
||||
continue
|
||||
}
|
||||
@ -454,7 +481,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.Predicat
|
||||
}
|
||||
|
||||
numNewVolumes := len(newVolumes)
|
||||
maxAttachLimit := c.maxVolumes
|
||||
maxAttachLimit := c.maxVolumeFunc(nodeInfo.Node())
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
volumeLimits := nodeInfo.VolumeLimits()
|
||||
@ -638,12 +665,12 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad
|
||||
nodeV, _ := nodeConstraints[k]
|
||||
volumeVSet, err := volumeutil.LabelZonesToSet(v)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err)
|
||||
klog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !volumeVSet.Has(nodeV) {
|
||||
glog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
|
||||
klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
|
||||
return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil
|
||||
}
|
||||
}
|
||||
@ -754,11 +781,11 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
|
||||
}
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
if klog.V(10) {
|
||||
if len(predicateFails) == 0 {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.",
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.",
|
||||
podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber)
|
||||
}
|
||||
}
|
||||
@ -807,14 +834,14 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool {
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
// if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||
// nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms
|
||||
// glog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
// klog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
// nodeAffinityMatches = nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||
// }
|
||||
|
||||
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
nodeAffinityMatches = nodeAffinityMatches && nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||
}
|
||||
|
||||
@ -906,7 +933,7 @@ type ServiceAffinity struct {
|
||||
// only should be referenced by NewServiceAffinityPredicate.
|
||||
func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) {
|
||||
if pm.pod == nil {
|
||||
glog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.")
|
||||
klog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.")
|
||||
return
|
||||
}
|
||||
pm.serviceAffinityInUse = true
|
||||
@ -918,7 +945,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
|
||||
|
||||
// In the future maybe we will return them as part of the function.
|
||||
if errSvc != nil || errList != nil {
|
||||
glog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList)
|
||||
klog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList)
|
||||
}
|
||||
// consider only the pods that belong to the same namespace
|
||||
pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace)
|
||||
@ -1145,10 +1172,10 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm
|
||||
return false, failedPredicates, error
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
if klog.V(10) {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied",
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied",
|
||||
podName(pod), node.Name)
|
||||
}
|
||||
return true, nil, nil
|
||||
@ -1159,7 +1186,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm
|
||||
// targetPod matches all the terms and their topologies, 2) whether targetPod
|
||||
// matches all the terms label selector and namespaces (AKA term properties),
|
||||
// 3) any error.
|
||||
func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod *v1.Pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) {
|
||||
func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) {
|
||||
if len(terms) == 0 {
|
||||
return false, false, fmt.Errorf("terms array is empty")
|
||||
}
|
||||
@ -1167,7 +1194,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod *v1.Pod, targetPod *
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
if !podMatchesAffinityTermProperties(targetPod, props) {
|
||||
if !podMatchesAllAffinityTermProperties(targetPod, props) {
|
||||
return false, false, nil
|
||||
}
|
||||
// Namespace and selector of the terms have matched. Now we check topology of the terms.
|
||||
@ -1214,186 +1241,129 @@ func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.Po
|
||||
return terms
|
||||
}
|
||||
|
||||
func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (map[string][]matchingPodAntiAffinityTerm, error) {
|
||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||
for name := range nodeInfoMap {
|
||||
allNodeNames = append(allNodeNames, name)
|
||||
// getMatchingAntiAffinityTopologyPairs calculates the following for "existingPod" on given node:
|
||||
// (1) Whether it has PodAntiAffinity
|
||||
// (2) Whether ANY AffinityTerm matches the incoming pod
|
||||
func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) (*topologyPairsMaps, error) {
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity == nil || affinity.PodAntiAffinity == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var lock sync.Mutex
|
||||
var firstError error
|
||||
result := make(map[string][]matchingPodAntiAffinityTerm)
|
||||
appendResult := func(toAppend map[string][]matchingPodAntiAffinityTerm) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
for uid, terms := range toAppend {
|
||||
result[uid] = append(result[uid], terms...)
|
||||
}
|
||||
}
|
||||
catchError := func(err error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
topologyMaps := newTopologyPairsMaps()
|
||||
for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
topologyMaps.addTopologyPair(pair, existingPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
return topologyMaps, nil
|
||||
}
|
||||
|
||||
processNode := func(i int) {
|
||||
nodeInfo := nodeInfoMap[allNodeNames[i]]
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
catchError(fmt.Errorf("node not found"))
|
||||
return
|
||||
}
|
||||
nodeResult := make(map[string][]matchingPodAntiAffinityTerm)
|
||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity == nil {
|
||||
func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.Pod, existingPods []*v1.Pod) (*topologyPairsMaps, error) {
|
||||
topologyMaps := newTopologyPairsMaps()
|
||||
|
||||
for _, existingPod := range existingPods {
|
||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
continue
|
||||
}
|
||||
for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
catchError(err)
|
||||
return
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
|
||||
existingPodFullName := schedutil.GetPodFullName(existingPod)
|
||||
nodeResult[existingPodFullName] = append(
|
||||
nodeResult[existingPodFullName],
|
||||
matchingPodAntiAffinityTerm{term: &term, node: node})
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if len(nodeResult) > 0 {
|
||||
appendResult(nodeResult)
|
||||
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
topologyMaps.appendMaps(existingPodTopologyMaps)
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
return result, firstError
|
||||
}
|
||||
|
||||
func getMatchingAntiAffinityTermsOfExistingPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) ([]matchingPodAntiAffinityTerm, error) {
|
||||
var result []matchingPodAntiAffinityTerm
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||
for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) {
|
||||
result = append(result, matchingPodAntiAffinityTerm{term: &term, node: node})
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) (map[string][]matchingPodAntiAffinityTerm, error) {
|
||||
result := make(map[string][]matchingPodAntiAffinityTerm)
|
||||
for _, existingPod := range allPods {
|
||||
affinity := existingPod.Spec.Affinity
|
||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
existingPodMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(pod, existingPod, existingPodNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(existingPodMatchingTerms) > 0 {
|
||||
existingPodFullName := schedutil.GetPodFullName(existingPod)
|
||||
result[existingPodFullName] = existingPodMatchingTerms
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
return topologyMaps, nil
|
||||
}
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any anti-affinity
|
||||
// rules indicated by the existing pods.
|
||||
// terms indicated by the existing pods.
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil")
|
||||
}
|
||||
var matchingTerms map[string][]matchingPodAntiAffinityTerm
|
||||
var topologyMaps *topologyPairsMaps
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
matchingTerms = predicateMeta.matchingAntiAffinityTerms
|
||||
topologyMaps = predicateMeta.topologyPairsAntiAffinityPodsMap
|
||||
} else {
|
||||
// Filter out pods whose nodeName is equal to nodeInfo.node.Name, but are not
|
||||
// present in nodeInfo. Pods on other nodes pass the filter.
|
||||
filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything())
|
||||
if err != nil {
|
||||
errMessage := fmt.Sprintf("Failed to get all pods, %+v", err)
|
||||
glog.Error(errMessage)
|
||||
klog.Error(errMessage)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
if matchingTerms, err = c.getMatchingAntiAffinityTerms(pod, filteredPods); err != nil {
|
||||
if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil {
|
||||
errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err)
|
||||
glog.Error(errMessage)
|
||||
klog.Error(errMessage)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
}
|
||||
for _, terms := range matchingTerms {
|
||||
for i := range terms {
|
||||
term := &terms[i]
|
||||
if len(term.term.TopologyKey) == 0 {
|
||||
errMessage := fmt.Sprintf("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
||||
glog.Error(errMessage)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
if priorityutil.NodesHaveSameTopologyKey(node, term.node, term.term.TopologyKey) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAntiAffinityTerm %v",
|
||||
podName(pod), node.Name, term.term)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
|
||||
// Iterate over topology pairs to get any of the pods being affected by
|
||||
// the scheduled pod anti-affinity terms
|
||||
for topologyKey, topologyValue := range node.Labels {
|
||||
if topologyMaps.topologyPairToPods[topologyPair{key: topologyKey, value: topologyValue}] != nil {
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
}
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
if klog.V(10) {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity rules satisfied.",
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.",
|
||||
podName(pod), node.Name)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// anyPodsMatchingTopologyTerms checks whether any of the nodes given via
|
||||
// "targetPods" matches topology of all the "terms" for the give "pod" and "nodeInfo".
|
||||
func (c *PodAffinityChecker) anyPodsMatchingTopologyTerms(pod *v1.Pod, targetPods map[string][]*v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, error) {
|
||||
for nodeName, targetPods := range targetPods {
|
||||
targetPodNodeInfo, err := c.info.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(targetPods) > 0 {
|
||||
allTermsMatched := true
|
||||
for _, term := range terms {
|
||||
if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo, term.TopologyKey) {
|
||||
allTermsMatched = false
|
||||
break
|
||||
}
|
||||
// nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches
|
||||
// topology of all the "terms" for the given "pod".
|
||||
func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool {
|
||||
node := nodeInfo.Node()
|
||||
for _, term := range terms {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
if _, ok := topologyPairs.topologyPairToPods[pair]; !ok {
|
||||
return false
|
||||
}
|
||||
if allTermsMatched {
|
||||
// We have 1 or more pods on the target node that have already matched namespace and selector
|
||||
// and all of the terms topologies matched the target node. So, there is at least 1 matching pod on the node.
|
||||
return true, nil
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches
|
||||
// topology of any "term" for the given "pod".
|
||||
func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool {
|
||||
node := nodeInfo.Node()
|
||||
for _, term := range terms {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
if _, ok := topologyPairs.topologyPairToPods[pair]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any rules of this pod.
|
||||
// Checks if scheduling the pod onto this node would break any term of this pod.
|
||||
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo,
|
||||
affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) {
|
||||
@ -1403,21 +1373,16 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
}
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
// Check all affinity terms.
|
||||
matchingPods := predicateMeta.nodeNameToMatchingAffinityPods
|
||||
topologyPairsPotentialAffinityPods := predicateMeta.topologyPairsPotentialAffinityPods
|
||||
if affinityTerms := GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 {
|
||||
matchExists, err := c.anyPodsMatchingTopologyTerms(pod, matchingPods, nodeInfo, affinityTerms)
|
||||
if err != nil {
|
||||
errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err)
|
||||
glog.Errorf(errMessage)
|
||||
return ErrPodAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
matchExists := c.nodeMatchesAllTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms)
|
||||
if !matchExists {
|
||||
// This pod may the first pod in a series that have affinity to themselves. In order
|
||||
// to not leave such pods in pending state forever, we check that if no other pod
|
||||
// in the cluster matches the namespace and selector of this pod and the pod matches
|
||||
// its own terms, then we allow the pod to pass the affinity check.
|
||||
if !(len(matchingPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
if !(len(topologyPairsPotentialAffinityPods.topologyPairToPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) {
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAffinityRulesNotMatch, nil
|
||||
}
|
||||
@ -1425,16 +1390,16 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
}
|
||||
|
||||
// Check all anti-affinity terms.
|
||||
matchingPods = predicateMeta.nodeNameToMatchingAntiAffinityPods
|
||||
topologyPairsPotentialAntiAffinityPods := predicateMeta.topologyPairsPotentialAntiAffinityPods
|
||||
if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 {
|
||||
matchExists, err := c.anyPodsMatchingTopologyTerms(pod, matchingPods, nodeInfo, antiAffinityTerms)
|
||||
if err != nil || matchExists {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity, err: %v",
|
||||
podName(pod), node.Name, err)
|
||||
matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms)
|
||||
if matchExists {
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
}
|
||||
} else { // We don't have precomputed metadata. We have to follow a slow path to check affinity rules.
|
||||
} else { // We don't have precomputed metadata. We have to follow a slow path to check affinity terms.
|
||||
filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything())
|
||||
if err != nil {
|
||||
return ErrPodAffinityRulesNotMatch, err
|
||||
@ -1449,7 +1414,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
affTermsMatch, termsSelectorMatch, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, affinityTerms)
|
||||
if err != nil {
|
||||
errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err)
|
||||
glog.Error(errMessage)
|
||||
klog.Error(errMessage)
|
||||
return ErrPodAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
if termsSelectorMatch {
|
||||
@ -1464,7 +1429,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
if len(antiAffinityTerms) > 0 {
|
||||
antiAffTermsMatch, _, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, antiAffinityTerms)
|
||||
if err != nil || antiAffTermsMatch {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v",
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v",
|
||||
podName(pod), node.Name, err)
|
||||
return ErrPodAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
@ -1472,29 +1437,29 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
}
|
||||
|
||||
if !matchFound && len(affinityTerms) > 0 {
|
||||
// We have not been able to find any matches for the pod's affinity rules.
|
||||
// We have not been able to find any matches for the pod's affinity terms.
|
||||
// This pod may be the first pod in a series that have affinity to themselves. In order
|
||||
// to not leave such pods in pending state forever, we check that if no other pod
|
||||
// in the cluster matches the namespace and selector of this pod and the pod matches
|
||||
// its own terms, then we allow the pod to pass the affinity check.
|
||||
if termsSelectorMatchFound {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAffinityRulesNotMatch, nil
|
||||
}
|
||||
// Check if pod matches its own affinity properties (namespace and label selector).
|
||||
if !targetPodMatchesAffinityOfPod(pod, pod) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAffinityRulesNotMatch, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
if klog.V(10) {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.",
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.",
|
||||
podName(pod), node.Name)
|
||||
}
|
||||
return nil, nil
|
||||
@ -1506,7 +1471,14 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
|
||||
if nodeInfo.Node().Spec.Unschedulable {
|
||||
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
|
||||
podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
// TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13.
|
||||
if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil
|
||||
}
|
||||
|
||||
@ -1662,12 +1634,12 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
|
||||
|
||||
failReasons := []algorithm.PredicateFailureReason{}
|
||||
if !boundSatisfied {
|
||||
glog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
failReasons = append(failReasons, ErrVolumeNodeConflict)
|
||||
}
|
||||
|
||||
if !unboundSatisfied {
|
||||
glog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
failReasons = append(failReasons, ErrVolumeBindConflict)
|
||||
}
|
||||
|
||||
@ -1676,6 +1648,6 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
|
||||
}
|
||||
|
||||
// All volumes bound or matching PVs found for all unbound PVCs
|
||||
glog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
return true, nil, nil
|
||||
}
|
||||
|
2193
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates_test.go
generated
vendored
2193
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go
generated
vendored
@ -19,7 +19,7 @@ package predicates
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// FindLabelsInSet gets as many key/value pairs as possible out of a label set.
|
||||
@ -68,7 +68,7 @@ func CreateSelectorFromLabels(aL map[string]string) labels.Selector {
|
||||
|
||||
// portsConflict check whether existingPorts and wantPorts conflict with each other
|
||||
// return true if we have a conflict
|
||||
func portsConflict(existingPorts schedutil.HostPortInfo, wantPorts []*v1.ContainerPort) bool {
|
||||
func portsConflict(existingPorts schedulercache.HostPortInfo, wantPorts []*v1.ContainerPort) bool {
|
||||
for _, cp := range wantPorts {
|
||||
if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) {
|
||||
return true
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD
generated
vendored
@ -37,14 +37,14 @@ go_library(
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -55,6 +55,7 @@ go_test(
|
||||
"image_locality_test.go",
|
||||
"interpod_affinity_test.go",
|
||||
"least_requested_test.go",
|
||||
"main_test.go",
|
||||
"metadata_test.go",
|
||||
"most_requested_test.go",
|
||||
"node_affinity_test.go",
|
||||
@ -74,13 +75,13 @@ go_test(
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -32,7 +32,7 @@ var (
|
||||
// BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together
|
||||
// with LeastRequestedPriority. It calculates the difference between the cpu and memory fraction
|
||||
// of capacity, and prioritizes the host based on how close the two metrics are to each other.
|
||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||
// Detail: score = 10 - variance(cpuFraction,memoryFraction,volumeFraction)*10. The algorithm is partly inspired by:
|
||||
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced
|
||||
// Resource Utilization"
|
||||
BalancedResourceAllocationMap = balancedResourcePriority.PriorityMap
|
||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@ -25,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
@ -44,7 +44,7 @@ func getExistingVolumeCountForNode(pods []*v1.Pod, maxVolumes int) int {
|
||||
|
||||
func TestBalancedResourceAllocation(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
podwithVol1 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
|
66
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality.go
generated
vendored
66
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality.go
generated
vendored
@ -26,11 +26,12 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
)
|
||||
|
||||
// This is a reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
|
||||
// The two thresholds are used as bounds for the image score range. They correspond to a reasonable size range for
|
||||
// container images compressed and stored in registries; 90%ile of images on dockerhub drops into this range.
|
||||
const (
|
||||
mb int64 = 1024 * 1024
|
||||
minImgSize int64 = 23 * mb
|
||||
maxImgSize int64 = 1000 * mb
|
||||
mb int64 = 1024 * 1024
|
||||
minThreshold int64 = 23 * mb
|
||||
maxThreshold int64 = 1000 * mb
|
||||
)
|
||||
|
||||
// ImageLocalityPriorityMap is a priority function that favors nodes that already have requested pod container's images.
|
||||
@ -44,44 +45,55 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
sumSize := totalImageSize(nodeInfo, pod.Spec.Containers)
|
||||
var score int
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
score = calculatePriority(sumImageScores(nodeInfo, pod.Spec.Containers, priorityMeta.totalNumNodes))
|
||||
} else {
|
||||
// if we are not able to parse priority meta data, skip this priority
|
||||
score = 0
|
||||
}
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: calculateScoreFromSize(sumSize),
|
||||
Score: score,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// calculateScoreFromSize calculates the priority of a node. sumSize is sum size of requested images on this node.
|
||||
// 1. Split image size range into 10 buckets.
|
||||
// 2. Decide the priority of a given sumSize based on which bucket it belongs to.
|
||||
func calculateScoreFromSize(sumSize int64) int {
|
||||
switch {
|
||||
case sumSize == 0 || sumSize < minImgSize:
|
||||
// 0 means none of the images required by this pod are present on this
|
||||
// node or the total size of the images present is too small to be taken into further consideration.
|
||||
return 0
|
||||
|
||||
case sumSize >= maxImgSize:
|
||||
// If existing images' total size is larger than max, just make it highest priority.
|
||||
return schedulerapi.MaxPriority
|
||||
// calculatePriority returns the priority of a node. Given the sumScores of requested images on the node, the node's
|
||||
// priority is obtained by scaling the maximum priority value with a ratio proportional to the sumScores.
|
||||
func calculatePriority(sumScores int64) int {
|
||||
if sumScores < minThreshold {
|
||||
sumScores = minThreshold
|
||||
} else if sumScores > maxThreshold {
|
||||
sumScores = maxThreshold
|
||||
}
|
||||
|
||||
return int((int64(schedulerapi.MaxPriority) * (sumSize - minImgSize) / (maxImgSize - minImgSize)) + 1)
|
||||
return int(int64(schedulerapi.MaxPriority) * (sumScores - minThreshold) / (maxThreshold - minThreshold))
|
||||
}
|
||||
|
||||
// totalImageSize returns the total image size of all the containers that are already on the node.
|
||||
func totalImageSize(nodeInfo *schedulercache.NodeInfo, containers []v1.Container) int64 {
|
||||
var total int64
|
||||
// sumImageScores returns the sum of image scores of all the containers that are already on the node.
|
||||
// Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate
|
||||
// the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers.
|
||||
func sumImageScores(nodeInfo *schedulercache.NodeInfo, containers []v1.Container, totalNumNodes int) int64 {
|
||||
var sum int64
|
||||
imageStates := nodeInfo.ImageStates()
|
||||
|
||||
imageSizes := nodeInfo.ImageSizes()
|
||||
for _, container := range containers {
|
||||
if size, ok := imageSizes[normalizedImageName(container.Image)]; ok {
|
||||
total += size
|
||||
if state, ok := imageStates[normalizedImageName(container.Image)]; ok {
|
||||
sum += scaledImageScore(state, totalNumNodes)
|
||||
}
|
||||
}
|
||||
|
||||
return total
|
||||
return sum
|
||||
}
|
||||
|
||||
// scaledImageScore returns an adaptively scaled score for the given state of an image.
|
||||
// The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to.
|
||||
// This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or
|
||||
// a few nodes due to image locality.
|
||||
func scaledImageScore(imageState *schedulercache.ImageStateSummary, totalNumNodes int) int64 {
|
||||
spread := float64(imageState.NumNodes) / float64(totalNumNodes)
|
||||
return int64(float64(imageState.Size) * spread)
|
||||
}
|
||||
|
||||
// normalizedImageName returns the CRI compliant name for a given image.
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality_test.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality_test.go
generated
vendored
@ -42,13 +42,13 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
test40140 := v1.PodSpec{
|
||||
test40300 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/40",
|
||||
},
|
||||
{
|
||||
Image: "gcr.io/140",
|
||||
Image: "gcr.io/300",
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -64,7 +64,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
node401402000 := v1.NodeStatus{
|
||||
node403002000 := v1.NodeStatus{
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{
|
||||
@ -76,10 +76,10 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/140:" + parsers.DefaultImageTag,
|
||||
"gcr.io/140:v1",
|
||||
"gcr.io/300:" + parsers.DefaultImageTag,
|
||||
"gcr.io/300:v1",
|
||||
},
|
||||
SizeBytes: int64(140 * mb),
|
||||
SizeBytes: int64(300 * mb),
|
||||
},
|
||||
{
|
||||
Names: []string{
|
||||
@ -120,29 +120,29 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
|
||||
// Node1
|
||||
// Image: gcr.io/40:latest 40MB
|
||||
// Score: (40M-23M)/97.7M + 1 = 1
|
||||
// Score: 0 (40M/2 < 23M, min-threshold)
|
||||
|
||||
// Node2
|
||||
// Image: gcr.io/250:latest 250MB
|
||||
// Score: (250M-23M)/97.7M + 1 = 3
|
||||
// Score: 10 * (250M/2 - 23M)/(1000M - 23M) = 1
|
||||
pod: &v1.Pod{Spec: test40250},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 1}},
|
||||
name: "two images spread on two nodes, prefer the larger image one",
|
||||
},
|
||||
{
|
||||
// Pod: gcr.io/40 gcr.io/140
|
||||
// Pod: gcr.io/40 gcr.io/300
|
||||
|
||||
// Node1
|
||||
// Image: gcr.io/40:latest 40MB, gcr.io/140:latest 140MB
|
||||
// Score: (40M+140M-23M)/97.7M + 1 = 2
|
||||
// Image: gcr.io/40:latest 40MB, gcr.io/300:latest 300MB
|
||||
// Score: 10 * ((40M + 300M)/2 - 23M)/(1000M - 23M) = 1
|
||||
|
||||
// Node2
|
||||
// Image: not present
|
||||
// Score: 0
|
||||
pod: &v1.Pod{Spec: test40140},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
|
||||
pod: &v1.Pod{Spec: test40300},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 0}},
|
||||
name: "two images on one node, prefer this node",
|
||||
},
|
||||
{
|
||||
@ -150,13 +150,13 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
|
||||
// Node1
|
||||
// Image: gcr.io/2000:latest 2000MB
|
||||
// Score: 2000 > max score = 10
|
||||
// Score: 10 (2000M/2 >= 1000M, max-threshold)
|
||||
|
||||
// Node2
|
||||
// Image: gcr.io/10:latest 10MB
|
||||
// Score: 10 < min score = 0
|
||||
// Score: 0 (10M/2 < 23M, min-threshold)
|
||||
pod: &v1.Pod{Spec: testMinMax},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||
name: "if exceed limit, use limit",
|
||||
},
|
||||
@ -165,7 +165,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||
list, err := priorityFunction(ImageLocalityPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
||||
list, err := priorityFunction(ImageLocalityPriorityMap, nil, &priorityMetadata{totalNumNodes: len(test.nodes)})(test.pod, nodeNameToInfo, test.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -29,7 +30,7 @@ import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// InterPodAffinity contains information to calculate inter pod affinity.
|
||||
@ -136,7 +137,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
||||
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@ -210,7 +211,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
||||
}
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
|
||||
if pm.firstError != nil {
|
||||
return nil, pm.firstError
|
||||
}
|
||||
@ -232,8 +233,8 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
||||
fScore = float64(schedulerapi.MaxPriority) * ((pm.counts[node.Name] - minCount) / (maxCount - minCount))
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
if glog.V(10) {
|
||||
glog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
|
||||
if klog.V(10) {
|
||||
klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/least_requested.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/least_requested.go
generated
vendored
@ -29,7 +29,7 @@ var (
|
||||
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
|
||||
//
|
||||
// Details:
|
||||
// cpu((capacity-sum(requested))*10/capacity) + memory((capacity-sum(requested))*10/capacity)/2
|
||||
// (cpu((capacity-sum(requested))*10/capacity) + memory((capacity-sum(requested))*10/capacity))/2
|
||||
LeastRequestedPriorityMap = leastResourcePriority.PriorityMap
|
||||
)
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/main_test.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/main_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
5
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
@ -52,6 +51,7 @@ type priorityMetadata struct {
|
||||
podSelectors []labels.Selector
|
||||
controllerRef *metav1.OwnerReference
|
||||
podFirstServiceSelector labels.Selector
|
||||
totalNumNodes int
|
||||
}
|
||||
|
||||
// PriorityMetadata is a PriorityMetadataProducer. Node info can be nil.
|
||||
@ -65,8 +65,9 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo
|
||||
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
|
||||
affinity: pod.Spec.Affinity,
|
||||
podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
|
||||
controllerRef: priorityutil.GetControllerRef(pod),
|
||||
controllerRef: metav1.GetControllerOf(pod),
|
||||
podFirstServiceSelector: getFirstServiceSelector(pod, pmf.serviceLister),
|
||||
totalNumNodes: len(nodeNameToInfo),
|
||||
}
|
||||
}
|
||||
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata_test.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata_test.go
generated
vendored
@ -20,9 +20,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
@ -153,14 +152,14 @@ func TestPriorityMetadata(t *testing.T) {
|
||||
name: "Produce a priorityMetadata with specified requests",
|
||||
},
|
||||
}
|
||||
mataDataProducer := NewPriorityMetadataFactory(
|
||||
metaDataProducer := NewPriorityMetadataFactory(
|
||||
schedulertesting.FakeServiceLister([]*v1.Service{}),
|
||||
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
|
||||
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
|
||||
schedulertesting.FakeReplicaSetLister([]*apps.ReplicaSet{}),
|
||||
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ptData := mataDataProducer(test.pod, nil)
|
||||
ptData := metaDataProducer(test.pod, nil)
|
||||
if !reflect.DeepEqual(test.expected, ptData) {
|
||||
t.Errorf("expected %#v, got %#v", test.expected, ptData)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/most_requested.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/most_requested.go
generated
vendored
@ -39,10 +39,10 @@ func mostResourceScorer(requested, allocable *schedulercache.Resource, includeVo
|
||||
// The used capacity is calculated on a scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest.
|
||||
// The more resources are used the higher the score is. This function
|
||||
// is almost a reversed version of least_requested_priority.calculatUnusedScore
|
||||
// is almost a reversed version of least_requested_priority.calculateUnusedScore
|
||||
// (10 - calculateUnusedScore). The main difference is in rounding. It was added to
|
||||
// keep the final formula clean and not to modify the widely used (by users
|
||||
// in their default scheduling policies) calculateUSedScore.
|
||||
// in their default scheduling policies) calculateUsedScore.
|
||||
func mostRequestedScore(requested, capacity int64) int64 {
|
||||
if capacity == 0 {
|
||||
return 0
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
@ -39,7 +38,7 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node
|
||||
controllerRef = priorityMeta.controllerRef
|
||||
} else {
|
||||
// We couldn't parse metadata - fallback to the podspec.
|
||||
controllerRef = priorityutil.GetControllerRef(pod)
|
||||
controllerRef = metav1.GetControllerOf(pod)
|
||||
}
|
||||
|
||||
if controllerRef != nil {
|
||||
|
@ -209,19 +209,19 @@ func TestRequestedToCapacityRatio(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
nodeNames := make([]string, 0)
|
||||
var nodeNames []string
|
||||
for nodeName := range test.nodes {
|
||||
nodeNames = append(nodeNames, nodeName)
|
||||
}
|
||||
sort.Strings(nodeNames)
|
||||
|
||||
nodes := make([]*v1.Node, 0)
|
||||
var nodes []*v1.Node
|
||||
for _, nodeName := range nodeNames {
|
||||
node := test.nodes[nodeName]
|
||||
nodes = append(nodes, makeNode(nodeName, node.capacity.cpu, node.capacity.mem))
|
||||
}
|
||||
|
||||
scheduledPods := make([]*v1.Pod, 0)
|
||||
var scheduledPods []*v1.Pod
|
||||
for name, node := range test.nodes {
|
||||
scheduledPods = append(scheduledPods,
|
||||
buildResourcesPod(name, node.used))
|
||||
|
33
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go
generated
vendored
@ -19,8 +19,10 @@ package priorities
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
@ -56,20 +58,31 @@ func (r *ResourceAllocationPriority) PriorityMap(
|
||||
requested.Memory += nodeInfo.NonZeroRequest().Memory
|
||||
var score int64
|
||||
// Check if the pod has volumes and this could be added to scorer function for balanced resource allocation.
|
||||
if len(pod.Spec.Volumes) >= 0 && nodeInfo.TransientInfo != nil {
|
||||
if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
|
||||
score = r.scorer(&requested, &allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount)
|
||||
} else {
|
||||
score = r.scorer(&requested, &allocatable, false, 0, 0)
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
glog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory,
|
||||
requested.MilliCPU+allocatable.MilliCPU, requested.Memory+allocatable.Memory,
|
||||
score,
|
||||
)
|
||||
if klog.V(10) {
|
||||
if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
|
||||
klog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, %d volumes, total request %d millicores %d memory bytes %d volumes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount,
|
||||
requested.MilliCPU, requested.Memory,
|
||||
nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes,
|
||||
score,
|
||||
)
|
||||
} else {
|
||||
klog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory,
|
||||
requested.MilliCPU, requested.Memory,
|
||||
score,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ResourceLimitsPriorityMap is a priority function that increases score of input node by 1 if the node satisfies
|
||||
@ -52,10 +52,10 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
|
||||
score = 1
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
if klog.V(10) {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof(
|
||||
klog.Infof(
|
||||
"%v -> %v: Resource Limits Priority, allocatable %d millicores %d memory bytes, pod limits %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name,
|
||||
allocatableResources.MilliCPU, allocatableResources.Memory,
|
||||
@ -70,7 +70,7 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
|
||||
}, nil
|
||||
}
|
||||
|
||||
// computeScore return 1 if limit value is less than or equal to allocable
|
||||
// computeScore returns 1 if limit value is less than or equal to allocatable
|
||||
// value, otherwise it returns 0.
|
||||
func computeScore(limit, allocatable int64) int64 {
|
||||
if limit != 0 && allocatable != 0 && limit <= allocatable {
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading
|
||||
@ -94,19 +94,15 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
|
||||
// Ignore the previous deleted version for spreading purposes
|
||||
// (it can still be considered for resource restrictions etc.)
|
||||
if nodePod.DeletionTimestamp != nil {
|
||||
glog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name)
|
||||
klog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name)
|
||||
continue
|
||||
}
|
||||
matches := false
|
||||
for _, selector := range selectors {
|
||||
if selector.Matches(labels.Set(nodePod.ObjectMeta.Labels)) {
|
||||
matches = true
|
||||
count++
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
@ -164,8 +160,8 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
|
||||
}
|
||||
}
|
||||
result[i].Score = int(fScore)
|
||||
if glog.V(10) {
|
||||
glog.Infof(
|
||||
if klog.V(10) {
|
||||
klog.Infof(
|
||||
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore),
|
||||
)
|
||||
}
|
||||
|
37
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading_test.go
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading_test.go
generated
vendored
@ -21,9 +21,8 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
@ -60,7 +59,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
pods []*v1.Pod
|
||||
nodes []string
|
||||
rcs []*v1.ReplicationController
|
||||
rss []*extensions.ReplicaSet
|
||||
rss []*apps.ReplicaSet
|
||||
services []*v1.Service
|
||||
sss []*apps.StatefulSet
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
@ -200,7 +199,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
name: "service with partial pod label matches with service and replica set",
|
||||
@ -241,7 +240,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
name: "disjoined service and replica set should be treated equally",
|
||||
@ -280,7 +279,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||
name: "Replica set with partial pod label matches",
|
||||
@ -318,7 +317,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
name: "Another replication set with partial pod label matches",
|
||||
@ -348,14 +347,14 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss),
|
||||
}
|
||||
|
||||
mataDataProducer := NewPriorityMetadataFactory(
|
||||
metaDataProducer := NewPriorityMetadataFactory(
|
||||
schedulertesting.FakeServiceLister(test.services),
|
||||
schedulertesting.FakeControllerLister(test.rcs),
|
||||
schedulertesting.FakeReplicaSetLister(test.rss),
|
||||
schedulertesting.FakeStatefulSetLister(test.sss))
|
||||
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
||||
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
||||
|
||||
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData)
|
||||
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, metaData)
|
||||
list, err := ttp(test.pod, nodeNameToInfo, makeNodeList(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v \n", err)
|
||||
@ -410,7 +409,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
rcs []*v1.ReplicationController
|
||||
rss []*extensions.ReplicaSet
|
||||
rss []*apps.ReplicaSet
|
||||
services []*v1.Service
|
||||
sss []*apps.StatefulSet
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
@ -584,13 +583,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss),
|
||||
}
|
||||
|
||||
mataDataProducer := NewPriorityMetadataFactory(
|
||||
metaDataProducer := NewPriorityMetadataFactory(
|
||||
schedulertesting.FakeServiceLister(test.services),
|
||||
schedulertesting.FakeControllerLister(test.rcs),
|
||||
schedulertesting.FakeReplicaSetLister(test.rss),
|
||||
schedulertesting.FakeStatefulSetLister(test.sss))
|
||||
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
||||
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData)
|
||||
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
||||
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, metaData)
|
||||
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(labeledNodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -761,23 +760,23 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
},
|
||||
}
|
||||
// these local variables just make sure controllerLister\replicaSetLister\statefulSetLister not nil
|
||||
// when construct mataDataProducer
|
||||
// when construct metaDataProducer
|
||||
sss := []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}
|
||||
rcs := []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}
|
||||
rss := []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}
|
||||
rss := []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes))
|
||||
zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"}
|
||||
|
||||
mataDataProducer := NewPriorityMetadataFactory(
|
||||
metaDataProducer := NewPriorityMetadataFactory(
|
||||
schedulertesting.FakeServiceLister(test.services),
|
||||
schedulertesting.FakeControllerLister(rcs),
|
||||
schedulertesting.FakeReplicaSetLister(rss),
|
||||
schedulertesting.FakeStatefulSetLister(sss))
|
||||
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
||||
ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, mataData)
|
||||
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
||||
ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, metaData)
|
||||
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go
generated
vendored
@ -41,18 +41,18 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||
}
|
||||
}
|
||||
|
||||
func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, mataData interface{}) algorithm.PriorityFunction {
|
||||
func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, metaData interface{}) algorithm.PriorityFunction {
|
||||
return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
for i := range nodes {
|
||||
hostResult, err := mapFn(pod, mataData, nodeNameToInfo[nodes[i].Name])
|
||||
hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, hostResult)
|
||||
}
|
||||
if reduceFn != nil {
|
||||
if err := reduceFn(pod, mataData, nodeNameToInfo, result); err != nil {
|
||||
if err := reduceFn(pod, metaData, nodeNameToInfo, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/BUILD
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/BUILD
generated
vendored
@ -11,17 +11,16 @@ go_test(
|
||||
srcs = [
|
||||
"non_zero_test.go",
|
||||
"topologies_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/selection:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -30,14 +29,12 @@ go_library(
|
||||
srcs = [
|
||||
"non_zero.go",
|
||||
"topologies.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/non_zero_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/non_zero_test.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
func TestGetNonzeroRequests(t *testing.T) {
|
||||
tds := []struct {
|
||||
tests := []struct {
|
||||
name string
|
||||
requests v1.ResourceList
|
||||
expectedCPU int64
|
||||
@ -65,9 +65,11 @@ func TestGetNonzeroRequests(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, td := range tds {
|
||||
realCPU, realMemory := GetNonzeroRequests(&td.requests)
|
||||
assert.EqualValuesf(t, td.expectedCPU, realCPU, "Failed to test: %s", td.name)
|
||||
assert.EqualValuesf(t, td.expectedMemory, realMemory, "Failed to test: %s", td.name)
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
realCPU, realMemory := GetNonzeroRequests(&test.requests)
|
||||
assert.EqualValuesf(t, test.expectedCPU, realCPU, "Failed to test: %s", test.name)
|
||||
assert.EqualValuesf(t, test.expectedMemory, realMemory, "Failed to test: %s", test.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/topologies_test.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/topologies_test.go
generated
vendored
@ -59,8 +59,10 @@ func TestGetNamespacesFromPodAffinityTerm(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
realValue := GetNamespacesFromPodAffinityTerm(fakePod(), test.podAffinityTerm)
|
||||
assert.EqualValuesf(t, test.expectedValue, realValue, "Failed to test: %s", test.name)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
realValue := GetNamespacesFromPodAffinityTerm(fakePod(), test.podAffinityTerm)
|
||||
assert.EqualValuesf(t, test.expectedValue, realValue, "Failed to test: %s", test.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,12 +98,14 @@ func TestPodMatchesTermsNamespaceAndSelector(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fakeTestPod := fakePod()
|
||||
fakeTestPod.Namespace = test.podNamespaces
|
||||
fakeTestPod.Labels = test.podLabels
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeTestPod := fakePod()
|
||||
fakeTestPod.Namespace = test.podNamespaces
|
||||
fakeTestPod.Labels = test.podLabels
|
||||
|
||||
realValue := PodMatchesTermsNamespaceAndSelector(fakeTestPod, fakeNamespaces, fakeSelector)
|
||||
assert.EqualValuesf(t, test.expectedResult, realValue, "Faild to test: %s", test.name)
|
||||
realValue := PodMatchesTermsNamespaceAndSelector(fakeTestPod, fakeNamespaces, fakeSelector)
|
||||
assert.EqualValuesf(t, test.expectedResult, realValue, "Faild to test: %s", test.name)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@ -248,7 +252,9 @@ func TestNodesHaveSameTopologyKey(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := NodesHaveSameTopologyKey(test.nodeA, test.nodeB, test.topologyKey)
|
||||
assert.Equalf(t, test.expected, got, "Failed to test: %s", test.name)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got := NodesHaveSameTopologyKey(test.nodeA, test.nodeB, test.topologyKey)
|
||||
assert.Equalf(t, test.expected, got, "Failed to test: %s", test.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
119
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/util_test.go
generated
vendored
119
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/util_test.go
generated
vendored
@ -1,119 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestGetControllerRef(t *testing.T) {
|
||||
fakeBlockOwnerDeletion := true
|
||||
fakeFalseController := false
|
||||
fakeTrueController := true
|
||||
fakeEmptyOwnerReference := metav1.OwnerReference{}
|
||||
|
||||
tds := []struct {
|
||||
name string
|
||||
pod v1.Pod
|
||||
expectedNil bool
|
||||
expectedOR metav1.OwnerReference
|
||||
}{
|
||||
{
|
||||
"ownerreference_not_exist",
|
||||
v1.Pod{},
|
||||
true,
|
||||
fakeEmptyOwnerReference,
|
||||
},
|
||||
{
|
||||
"ownerreference_controller_is_nil",
|
||||
v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "or-unit-test-5b9cffccff",
|
||||
UID: "a46372ea-b254-11e7-8373-fa163e25bfb5",
|
||||
BlockOwnerDeletion: &fakeBlockOwnerDeletion,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
fakeEmptyOwnerReference,
|
||||
},
|
||||
{
|
||||
"ownerreference_controller_is_false",
|
||||
v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "or-unit-test-5b9cffccff",
|
||||
UID: "a46372ea-b254-11e7-8373-fa163e25bfb5",
|
||||
Controller: &fakeFalseController,
|
||||
BlockOwnerDeletion: &fakeBlockOwnerDeletion,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
fakeEmptyOwnerReference,
|
||||
},
|
||||
{
|
||||
"ownerreference_controller_is_true",
|
||||
v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "or-unit-test-5b9cffccff",
|
||||
UID: "a46372ea-b254-11e7-8373-fa163e25bfb5",
|
||||
BlockOwnerDeletion: &fakeBlockOwnerDeletion,
|
||||
Controller: &fakeTrueController,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
metav1.OwnerReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "or-unit-test-5b9cffccff",
|
||||
UID: "a46372ea-b254-11e7-8373-fa163e25bfb5",
|
||||
BlockOwnerDeletion: &fakeBlockOwnerDeletion,
|
||||
Controller: &fakeTrueController,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, td := range tds {
|
||||
realOR := GetControllerRef(&td.pod)
|
||||
if td.expectedNil {
|
||||
assert.Nilf(t, realOR, "Failed to test: %s", td.name)
|
||||
} else {
|
||||
assert.Equalf(t, &td.expectedOR, realOR, "Failed to test: %s", td.name)
|
||||
}
|
||||
}
|
||||
}
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go
generated
vendored
@ -26,6 +26,9 @@ import (
|
||||
// decisions made by Kubernetes. This is typically needed for resources not directly
|
||||
// managed by Kubernetes.
|
||||
type SchedulerExtender interface {
|
||||
// Name returns a unique name that identifies the extender.
|
||||
Name() string
|
||||
|
||||
// Filter based on extender-implemented predicate functions. The filtered list is
|
||||
// expected to be a subset of the supplied list. failedNodesMap optionally contains
|
||||
// the list of failed nodes and failure reasons.
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface_test.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface_test.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package algorithm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// Some functions used by multiple scheduler tests.
|
||||
|
||||
type schedulerTester struct {
|
||||
t *testing.T
|
||||
scheduler ScheduleAlgorithm
|
||||
nodeLister NodeLister
|
||||
}
|
||||
|
||||
// Call if you know exactly where pod should get scheduled.
|
||||
func (st *schedulerTester) expectSchedule(pod *v1.Pod, expected string) {
|
||||
actual, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err != nil {
|
||||
st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod)
|
||||
return
|
||||
}
|
||||
if actual != expected {
|
||||
st.t.Errorf("Unexpected scheduling value: %v, expected %v", actual, expected)
|
||||
}
|
||||
}
|
||||
|
||||
// Call if you can't predict where pod will be scheduled.
|
||||
func (st *schedulerTester) expectSuccess(pod *v1.Pod) {
|
||||
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err != nil {
|
||||
st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Call if pod should *not* schedule.
|
||||
func (st *schedulerTester) expectFailure(pod *v1.Pod) {
|
||||
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err == nil {
|
||||
st.t.Error("Unexpected non-error")
|
||||
}
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go
generated
vendored
@ -17,18 +17,19 @@ limitations under the License.
|
||||
package algorithm
|
||||
|
||||
import (
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
// NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are
|
||||
// the functions to get the value of the node field.
|
||||
var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
|
||||
NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||
schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||
}
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
@ -98,7 +99,7 @@ type PodLister interface {
|
||||
List(labels.Selector) ([]*v1.Pod, error)
|
||||
// This is similar to "List()", but the returned slice does not
|
||||
// contain pods that don't pass `podFilter`.
|
||||
FilteredList(podFilter schedulercache.PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
FilteredList(podFilter schedulerinternalcache.PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
}
|
||||
|
||||
// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler.
|
||||
@ -120,7 +121,13 @@ type ControllerLister interface {
|
||||
// ReplicaSetLister interface represents anything that can produce a list of ReplicaSet; the list is consumed by a scheduler.
|
||||
type ReplicaSetLister interface {
|
||||
// Gets the replicasets for the given pod
|
||||
GetPodReplicaSets(*v1.Pod) ([]*extensions.ReplicaSet, error)
|
||||
GetPodReplicaSets(*v1.Pod) ([]*apps.ReplicaSet, error)
|
||||
}
|
||||
|
||||
// PDBLister interface represents anything that can list PodDisruptionBudget objects.
|
||||
type PDBLister interface {
|
||||
// List() returns a list of PodDisruptionBudgets matching the selector.
|
||||
List(labels.Selector) ([]*policyv1beta1.PodDisruptionBudget, error)
|
||||
}
|
||||
|
||||
var _ ControllerLister = &EmptyControllerLister{}
|
||||
@ -144,7 +151,7 @@ var _ ReplicaSetLister = &EmptyReplicaSetLister{}
|
||||
type EmptyReplicaSetLister struct{}
|
||||
|
||||
// GetPodReplicaSets returns nil
|
||||
func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||
func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*apps.ReplicaSet, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD
generated
vendored
@ -15,11 +15,16 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["plugins_test.go"],
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"plugins_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD
generated
vendored
@ -17,9 +17,9 @@ go_library(
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -36,14 +36,14 @@ go_test(
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/latest:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
159
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go
generated
vendored
159
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go
generated
vendored
@ -814,6 +814,130 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
},
|
||||
// Do not change this JSON after the corresponding release has been tagged.
|
||||
// A failure indicates backwards compatibility with the specified release was broken.
|
||||
"1.12": {
|
||||
JSON: `{
|
||||
"kind": "Policy",
|
||||
"apiVersion": "v1",
|
||||
"predicates": [
|
||||
{"name": "MatchNodeSelector"},
|
||||
{"name": "PodFitsResources"},
|
||||
{"name": "PodFitsHostPorts"},
|
||||
{"name": "HostName"},
|
||||
{"name": "NoDiskConflict"},
|
||||
{"name": "NoVolumeZoneConflict"},
|
||||
{"name": "PodToleratesNodeTaints"},
|
||||
{"name": "CheckNodeMemoryPressure"},
|
||||
{"name": "CheckNodeDiskPressure"},
|
||||
{"name": "CheckNodePIDPressure"},
|
||||
{"name": "CheckNodeCondition"},
|
||||
{"name": "MaxEBSVolumeCount"},
|
||||
{"name": "MaxGCEPDVolumeCount"},
|
||||
{"name": "MaxAzureDiskVolumeCount"},
|
||||
{"name": "MaxCSIVolumeCountPred"},
|
||||
{"name": "MatchInterPodAffinity"},
|
||||
{"name": "GeneralPredicates"},
|
||||
{"name": "CheckVolumeBinding"},
|
||||
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
|
||||
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
|
||||
],"priorities": [
|
||||
{"name": "EqualPriority", "weight": 2},
|
||||
{"name": "ImageLocalityPriority", "weight": 2},
|
||||
{"name": "LeastRequestedPriority", "weight": 2},
|
||||
{"name": "BalancedResourceAllocation", "weight": 2},
|
||||
{"name": "SelectorSpreadPriority", "weight": 2},
|
||||
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
|
||||
{"name": "NodeAffinityPriority", "weight": 2},
|
||||
{"name": "TaintTolerationPriority", "weight": 2},
|
||||
{"name": "InterPodAffinityPriority", "weight": 2},
|
||||
{"name": "MostRequestedPriority", "weight": 2},
|
||||
{
|
||||
"name": "RequestedToCapacityRatioPriority",
|
||||
"weight": 2,
|
||||
"argument": {
|
||||
"requestedToCapacityRatioArguments": {
|
||||
"shape": [
|
||||
{"utilization": 0, "score": 0},
|
||||
{"utilization": 50, "score": 7}
|
||||
]
|
||||
}
|
||||
}}
|
||||
],"extenders": [{
|
||||
"urlPrefix": "/prefix",
|
||||
"filterVerb": "filter",
|
||||
"prioritizeVerb": "prioritize",
|
||||
"weight": 1,
|
||||
"bindVerb": "bind",
|
||||
"enableHttps": true,
|
||||
"tlsConfig": {"Insecure":true},
|
||||
"httpTimeout": 1,
|
||||
"nodeCacheCapable": true,
|
||||
"managedResources": [{"name":"example.com/foo","ignoredByScheduler":true}],
|
||||
"ignorable":true
|
||||
}]
|
||||
}`,
|
||||
ExpectedPolicy: schedulerapi.Policy{
|
||||
Predicates: []schedulerapi.PredicatePolicy{
|
||||
{Name: "MatchNodeSelector"},
|
||||
{Name: "PodFitsResources"},
|
||||
{Name: "PodFitsHostPorts"},
|
||||
{Name: "HostName"},
|
||||
{Name: "NoDiskConflict"},
|
||||
{Name: "NoVolumeZoneConflict"},
|
||||
{Name: "PodToleratesNodeTaints"},
|
||||
{Name: "CheckNodeMemoryPressure"},
|
||||
{Name: "CheckNodeDiskPressure"},
|
||||
{Name: "CheckNodePIDPressure"},
|
||||
{Name: "CheckNodeCondition"},
|
||||
{Name: "MaxEBSVolumeCount"},
|
||||
{Name: "MaxGCEPDVolumeCount"},
|
||||
{Name: "MaxAzureDiskVolumeCount"},
|
||||
{Name: "MaxCSIVolumeCountPred"},
|
||||
{Name: "MatchInterPodAffinity"},
|
||||
{Name: "GeneralPredicates"},
|
||||
{Name: "CheckVolumeBinding"},
|
||||
{Name: "TestServiceAffinity", Argument: &schedulerapi.PredicateArgument{ServiceAffinity: &schedulerapi.ServiceAffinity{Labels: []string{"region"}}}},
|
||||
{Name: "TestLabelsPresence", Argument: &schedulerapi.PredicateArgument{LabelsPresence: &schedulerapi.LabelsPresence{Labels: []string{"foo"}, Presence: true}}},
|
||||
},
|
||||
Priorities: []schedulerapi.PriorityPolicy{
|
||||
{Name: "EqualPriority", Weight: 2},
|
||||
{Name: "ImageLocalityPriority", Weight: 2},
|
||||
{Name: "LeastRequestedPriority", Weight: 2},
|
||||
{Name: "BalancedResourceAllocation", Weight: 2},
|
||||
{Name: "SelectorSpreadPriority", Weight: 2},
|
||||
{Name: "NodePreferAvoidPodsPriority", Weight: 2},
|
||||
{Name: "NodeAffinityPriority", Weight: 2},
|
||||
{Name: "TaintTolerationPriority", Weight: 2},
|
||||
{Name: "InterPodAffinityPriority", Weight: 2},
|
||||
{Name: "MostRequestedPriority", Weight: 2},
|
||||
{
|
||||
Name: "RequestedToCapacityRatioPriority",
|
||||
Weight: 2,
|
||||
Argument: &schedulerapi.PriorityArgument{
|
||||
RequestedToCapacityRatioArguments: &schedulerapi.RequestedToCapacityRatioArguments{
|
||||
UtilizationShape: []schedulerapi.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
{Utilization: 50, Score: 7},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
ExtenderConfigs: []schedulerapi.ExtenderConfig{{
|
||||
URLPrefix: "/prefix",
|
||||
FilterVerb: "filter",
|
||||
PrioritizeVerb: "prioritize",
|
||||
Weight: 1,
|
||||
BindVerb: "bind", // 1.11 restored case-sensitivity, but allowed either "BindVerb" or "bindVerb"
|
||||
EnableHTTPS: true,
|
||||
TLSConfig: &restclient.TLSClientConfig{Insecure: true},
|
||||
HTTPTimeout: 1,
|
||||
NodeCacheCapable: true,
|
||||
ManagedResources: []schedulerapi.ExtenderManagedResource{{Name: v1.ResourceName("example.com/foo"), IgnoredByScheduler: true}},
|
||||
Ignorable: true,
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
registeredPredicates := sets.NewString(factory.ListRegisteredFitPredicates()...)
|
||||
@ -849,23 +973,24 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
if _, err := factory.NewConfigFactory(
|
||||
"some-scheduler-name",
|
||||
client,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
false,
|
||||
).CreateFromConfig(policy); err != nil {
|
||||
if _, err := factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||
SchedulerName: "some-scheduler-name",
|
||||
Client: client,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
|
||||
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
|
||||
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
|
||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
EnableEquivalenceClassCache: enableEquivalenceCache,
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
}).CreateFromConfig(policy); err != nil {
|
||||
t.Errorf("%s: Error constructing: %v", v, err)
|
||||
continue
|
||||
}
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -94,10 +94,6 @@ func init() {
|
||||
// Register the priority function so that its available
|
||||
// but do not include it as part of the default priorities
|
||||
factory.RegisterPriorityFunction2("EqualPriority", core.EqualPriorityMap, nil, 1)
|
||||
// ImageLocalityPriority prioritizes nodes based on locality of images requested by a pod. Nodes with larger size
|
||||
// of already-installed packages required by the pod will be preferred over nodes with no already-installed
|
||||
// packages required by the pod or a small total size of already-installed packages required by the pod.
|
||||
factory.RegisterPriorityFunction2("ImageLocalityPriority", priorities.ImageLocalityPriorityMap, nil, 1)
|
||||
// Optional, cluster-autoscaler friendly priority function - give used nodes higher priority.
|
||||
factory.RegisterPriorityFunction2("MostRequestedPriority", priorities.MostRequestedPriorityMap, nil, 1)
|
||||
factory.RegisterPriorityFunction2(
|
||||
@ -137,6 +133,12 @@ func defaultPredicates() sets.String {
|
||||
return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.PVInfo, args.PVCInfo)
|
||||
},
|
||||
),
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MaxCSIVolumeCountPred,
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
return predicates.NewCSIMaxVolumeLimitPredicate(args.PVInfo, args.PVCInfo)
|
||||
},
|
||||
),
|
||||
// Fit is determined by inter-pod affinity.
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MatchInterPodAffinityPred,
|
||||
@ -197,19 +199,24 @@ func ApplyFeatureGates() {
|
||||
|
||||
// Fit is determined based on whether a pod can tolerate all of the node's taints
|
||||
factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints)
|
||||
// Fit is determined based on whether a pod can tolerate unschedulable of node
|
||||
factory.RegisterMandatoryFitPredicate(predicates.CheckNodeUnschedulablePred, predicates.CheckNodeUnschedulablePredicate)
|
||||
// Insert Key "PodToleratesNodeTaints" and "CheckNodeUnschedulable" To All Algorithm Provider
|
||||
// The key will insert to all providers which in algorithmProviderMap[]
|
||||
// if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider()
|
||||
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred)
|
||||
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred)
|
||||
|
||||
glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
|
||||
klog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
|
||||
}
|
||||
|
||||
// Prioritizes nodes that satisfy pod's resource limits
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
|
||||
klog.Infof("Registering resourcelimits priority function")
|
||||
factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1)
|
||||
// Register the priority function to specific provider too.
|
||||
factory.InsertPriorityKeyToAlgorithmProviderMap(factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func registerAlgorithmProvider(predSet, priSet sets.String) {
|
||||
@ -260,6 +267,9 @@ func defaultPriorities() sets.String {
|
||||
|
||||
// Prioritizes nodes that marked with taint which pod can tolerate.
|
||||
factory.RegisterPriorityFunction2("TaintTolerationPriority", priorities.ComputeTaintTolerationPriorityMap, priorities.ComputeTaintTolerationPriorityReduce, 1),
|
||||
|
||||
// ImageLocalityPriority prioritizes nodes that have images requested by the pod present.
|
||||
factory.RegisterPriorityFunction2("ImageLocalityPriority", priorities.ImageLocalityPriorityMap, nil, 1),
|
||||
)
|
||||
}
|
||||
|
||||
|
28
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults_test.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults_test.go
generated
vendored
@ -59,7 +59,8 @@ func TestDefaultPriorities(t *testing.T) {
|
||||
"BalancedResourceAllocation",
|
||||
"NodePreferAvoidPodsPriority",
|
||||
"NodeAffinityPriority",
|
||||
"TaintTolerationPriority")
|
||||
"TaintTolerationPriority",
|
||||
"ImageLocalityPriority")
|
||||
if expected := defaultPriorities(); !result.Equal(expected) {
|
||||
t.Errorf("expected %v got %v", expected, result)
|
||||
}
|
||||
@ -67,18 +68,19 @@ func TestDefaultPriorities(t *testing.T) {
|
||||
|
||||
func TestDefaultPredicates(t *testing.T) {
|
||||
result := sets.NewString(
|
||||
"NoVolumeZoneConflict",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MatchInterPodAffinity",
|
||||
"NoDiskConflict",
|
||||
"GeneralPredicates",
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckNodeCondition",
|
||||
"PodToleratesNodeTaints",
|
||||
predicates.NoVolumeZoneConflictPred,
|
||||
predicates.MaxEBSVolumeCountPred,
|
||||
predicates.MaxGCEPDVolumeCountPred,
|
||||
predicates.MaxAzureDiskVolumeCountPred,
|
||||
predicates.MaxCSIVolumeCountPred,
|
||||
predicates.MatchInterPodAffinityPred,
|
||||
predicates.NoDiskConflictPred,
|
||||
predicates.GeneralPred,
|
||||
predicates.CheckNodeMemoryPressurePred,
|
||||
predicates.CheckNodeDiskPressurePred,
|
||||
predicates.CheckNodePIDPressurePred,
|
||||
predicates.CheckNodeConditionPred,
|
||||
predicates.PodToleratesNodeTaintsPred,
|
||||
predicates.CheckVolumeBindingPred,
|
||||
)
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/main_test.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/main_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package algorithmprovider
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
92
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/plugins_test.go
generated
vendored
92
vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/plugins_test.go
generated
vendored
@ -17,9 +17,12 @@ limitations under the License.
|
||||
package algorithmprovider
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
)
|
||||
|
||||
@ -44,66 +47,69 @@ func TestDefaultConfigExists(t *testing.T) {
|
||||
|
||||
func TestAlgorithmProviders(t *testing.T) {
|
||||
for _, pn := range algorithmProviderNames {
|
||||
p, err := factory.GetAlgorithmProvider(pn)
|
||||
if err != nil {
|
||||
t.Errorf("error retrieving '%s' provider: %v", pn, err)
|
||||
break
|
||||
}
|
||||
if len(p.PriorityFunctionKeys) == 0 {
|
||||
t.Errorf("%s algorithm provider shouldn't have 0 priority functions", pn)
|
||||
}
|
||||
for _, pf := range p.PriorityFunctionKeys.List() {
|
||||
if !factory.IsPriorityFunctionRegistered(pf) {
|
||||
t.Errorf("priority function %s is not registered but is used in the %s algorithm provider", pf, pn)
|
||||
t.Run(pn, func(t *testing.T) {
|
||||
p, err := factory.GetAlgorithmProvider(pn)
|
||||
if err != nil {
|
||||
t.Fatalf("error retrieving provider: %v", err)
|
||||
}
|
||||
}
|
||||
for _, fp := range p.FitPredicateKeys.List() {
|
||||
if !factory.IsFitPredicateRegistered(fp) {
|
||||
t.Errorf("fit predicate %s is not registered but is used in the %s algorithm provider", fp, pn)
|
||||
if len(p.PriorityFunctionKeys) == 0 {
|
||||
t.Errorf("algorithm provider shouldn't have 0 priority functions")
|
||||
}
|
||||
}
|
||||
for _, pf := range p.PriorityFunctionKeys.List() {
|
||||
t.Run(fmt.Sprintf("priorityfunction/%s", pf), func(t *testing.T) {
|
||||
if !factory.IsPriorityFunctionRegistered(pf) {
|
||||
t.Errorf("priority function is not registered but is used in the algorithm provider")
|
||||
}
|
||||
})
|
||||
}
|
||||
for _, fp := range p.FitPredicateKeys.List() {
|
||||
t.Run(fmt.Sprintf("fitpredicate/%s", fp), func(t *testing.T) {
|
||||
if !factory.IsFitPredicateRegistered(fp) {
|
||||
t.Errorf("fit predicate is not registered but is used in the algorithm provider")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyFeatureGates(t *testing.T) {
|
||||
for _, pn := range algorithmProviderNames {
|
||||
p, err := factory.GetAlgorithmProvider(pn)
|
||||
if err != nil {
|
||||
t.Errorf("Error retrieving '%s' provider: %v", pn, err)
|
||||
break
|
||||
}
|
||||
t.Run(pn, func(t *testing.T) {
|
||||
p, err := factory.GetAlgorithmProvider(pn)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving provider: %v", err)
|
||||
}
|
||||
|
||||
if !p.FitPredicateKeys.Has("CheckNodeCondition") {
|
||||
t.Errorf("Failed to find predicate: 'CheckNodeCondition'")
|
||||
break
|
||||
}
|
||||
if !p.FitPredicateKeys.Has("CheckNodeCondition") {
|
||||
t.Fatalf("Failed to find predicate: 'CheckNodeCondition'")
|
||||
}
|
||||
|
||||
if !p.FitPredicateKeys.Has("PodToleratesNodeTaints") {
|
||||
t.Errorf("Failed to find predicate: 'PodToleratesNodeTaints'")
|
||||
break
|
||||
}
|
||||
if !p.FitPredicateKeys.Has("PodToleratesNodeTaints") {
|
||||
t.Fatalf("Failed to find predicate: 'PodToleratesNodeTaints'")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Apply features for algorithm providers.
|
||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
|
||||
|
||||
ApplyFeatureGates()
|
||||
|
||||
for _, pn := range algorithmProviderNames {
|
||||
p, err := factory.GetAlgorithmProvider(pn)
|
||||
if err != nil {
|
||||
t.Errorf("Error retrieving '%s' provider: %v", pn, err)
|
||||
break
|
||||
}
|
||||
t.Run(pn, func(t *testing.T) {
|
||||
p, err := factory.GetAlgorithmProvider(pn)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving '%s' provider: %v", pn, err)
|
||||
}
|
||||
|
||||
if !p.FitPredicateKeys.Has("PodToleratesNodeTaints") {
|
||||
t.Errorf("Failed to find predicate: 'PodToleratesNodeTaints'")
|
||||
break
|
||||
}
|
||||
if !p.FitPredicateKeys.Has("PodToleratesNodeTaints") {
|
||||
t.Fatalf("Failed to find predicate: 'PodToleratesNodeTaints'")
|
||||
}
|
||||
|
||||
if p.FitPredicateKeys.Has("CheckNodeCondition") {
|
||||
t.Errorf("Unexpected predicate: 'CheckNodeCondition'")
|
||||
break
|
||||
}
|
||||
if p.FitPredicateKeys.Has("CheckNodeCondition") {
|
||||
t.Fatalf("Unexpected predicate: 'CheckNodeCondition'")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD
generated
vendored
@ -11,16 +11,18 @@ go_library(
|
||||
"doc.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"well_known_labels.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/api",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/scheduler/api/latest/BUILD
generated
vendored
@ -12,10 +12,10 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go
generated
vendored
@ -36,6 +36,9 @@ const (
|
||||
MaxPriority = 10
|
||||
// MaxWeight defines the max weight value.
|
||||
MaxWeight = MaxInt / MaxPriority
|
||||
// DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes
|
||||
// that once found feasible, the scheduler stops looking for more nodes.
|
||||
DefaultPercentageOfNodesToScore = 50
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/BUILD
generated
vendored
@ -16,12 +16,12 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/api/v1",
|
||||
deps = [
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
167
vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/zz_generated.deepcopy.go
generated
vendored
167
vendor/k8s.io/kubernetes/pkg/scheduler/api/v1/zz_generated.deepcopy.go
generated
vendored
@ -21,7 +21,7 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
@ -31,33 +31,21 @@ func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) {
|
||||
*out = *in
|
||||
if in.Pod != nil {
|
||||
in, out := &in.Pod, &out.Pod
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(core_v1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(corev1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Nodes != nil {
|
||||
in, out := &in.Nodes, &out.Nodes
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(core_v1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(corev1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeNames != nil {
|
||||
in, out := &in.NodeNames, &out.NodeNames
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -110,12 +98,8 @@ func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
|
||||
*out = *in
|
||||
if in.TLSConfig != nil {
|
||||
in, out := &in.TLSConfig, &out.TLSConfig
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(rest.TLSClientConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(rest.TLSClientConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ManagedResources != nil {
|
||||
in, out := &in.ManagedResources, &out.ManagedResources
|
||||
@ -140,24 +124,16 @@ func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) {
|
||||
*out = *in
|
||||
if in.Nodes != nil {
|
||||
in, out := &in.Nodes, &out.Nodes
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(core_v1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(corev1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeNames != nil {
|
||||
in, out := &in.NodeNames, &out.NodeNames
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
if in.FailedNodes != nil {
|
||||
@ -201,35 +177,37 @@ func (in *ExtenderPreemptionArgs) DeepCopyInto(out *ExtenderPreemptionArgs) {
|
||||
*out = *in
|
||||
if in.Pod != nil {
|
||||
in, out := &in.Pod, &out.Pod
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(core_v1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(corev1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeNameToVictims != nil {
|
||||
in, out := &in.NodeNameToVictims, &out.NodeNameToVictims
|
||||
*out = make(map[string]*Victims, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *Victims
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(Victims)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(Victims)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.NodeNameToMetaVictims != nil {
|
||||
in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims
|
||||
*out = make(map[string]*MetaVictims, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *MetaVictims
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(MetaVictims)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(MetaVictims)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -252,12 +230,15 @@ func (in *ExtenderPreemptionResult) DeepCopyInto(out *ExtenderPreemptionResult)
|
||||
in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims
|
||||
*out = make(map[string]*MetaVictims, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *MetaVictims
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(MetaVictims)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(MetaVictims)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -391,11 +372,10 @@ func (in *MetaVictims) DeepCopyInto(out *MetaVictims) {
|
||||
in, out := &in.Pods, &out.Pods
|
||||
*out = make([]*MetaPod, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] == nil {
|
||||
(*out)[i] = nil
|
||||
} else {
|
||||
(*out)[i] = new(MetaPod)
|
||||
(*in)[i].DeepCopyInto((*out)[i])
|
||||
if (*in)[i] != nil {
|
||||
in, out := &(*in)[i], &(*out)[i]
|
||||
*out = new(MetaPod)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -463,21 +443,13 @@ func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAffinity != nil {
|
||||
in, out := &in.ServiceAffinity, &out.ServiceAffinity
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ServiceAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(ServiceAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.LabelsPresence != nil {
|
||||
in, out := &in.LabelsPresence, &out.LabelsPresence
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(LabelsPresence)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(LabelsPresence)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -497,12 +469,8 @@ func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) {
|
||||
*out = *in
|
||||
if in.Argument != nil {
|
||||
in, out := &in.Argument, &out.Argument
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PredicateArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(PredicateArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -522,30 +490,18 @@ func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAntiAffinity != nil {
|
||||
in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ServiceAntiAffinity)
|
||||
**out = **in
|
||||
}
|
||||
*out = new(ServiceAntiAffinity)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelPreference != nil {
|
||||
in, out := &in.LabelPreference, &out.LabelPreference
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(LabelPreference)
|
||||
**out = **in
|
||||
}
|
||||
*out = new(LabelPreference)
|
||||
**out = **in
|
||||
}
|
||||
if in.RequestedToCapacityRatioArguments != nil {
|
||||
in, out := &in.RequestedToCapacityRatioArguments, &out.RequestedToCapacityRatioArguments
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(RequestedToCapacityRatioArguments)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(RequestedToCapacityRatioArguments)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -565,12 +521,8 @@ func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) {
|
||||
*out = *in
|
||||
if in.Argument != nil {
|
||||
in, out := &in.Argument, &out.Argument
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PriorityArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(PriorityArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -664,13 +616,12 @@ func (in *Victims) DeepCopyInto(out *Victims) {
|
||||
*out = *in
|
||||
if in.Pods != nil {
|
||||
in, out := &in.Pods, &out.Pods
|
||||
*out = make([]*core_v1.Pod, len(*in))
|
||||
*out = make([]*corev1.Pod, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] == nil {
|
||||
(*out)[i] = nil
|
||||
} else {
|
||||
(*out)[i] = new(core_v1.Pod)
|
||||
(*in)[i].DeepCopyInto((*out)[i])
|
||||
if (*in)[i] != nil {
|
||||
in, out := &(*in)[i], &(*out)[i]
|
||||
*out = new(corev1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/scheduler/api/validation/BUILD
generated
vendored
@ -13,10 +13,10 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package algorithm
|
||||
package api
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
163
vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go
generated
vendored
163
vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go
generated
vendored
@ -31,33 +31,21 @@ func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) {
|
||||
*out = *in
|
||||
if in.Pod != nil {
|
||||
in, out := &in.Pod, &out.Pod
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(v1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Nodes != nil {
|
||||
in, out := &in.Nodes, &out.Nodes
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(v1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeNames != nil {
|
||||
in, out := &in.NodeNames, &out.NodeNames
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -110,12 +98,8 @@ func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
|
||||
*out = *in
|
||||
if in.TLSConfig != nil {
|
||||
in, out := &in.TLSConfig, &out.TLSConfig
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(rest.TLSClientConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(rest.TLSClientConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ManagedResources != nil {
|
||||
in, out := &in.ManagedResources, &out.ManagedResources
|
||||
@ -140,24 +124,16 @@ func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) {
|
||||
*out = *in
|
||||
if in.Nodes != nil {
|
||||
in, out := &in.Nodes, &out.Nodes
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(v1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeNames != nil {
|
||||
in, out := &in.NodeNames, &out.NodeNames
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
if in.FailedNodes != nil {
|
||||
@ -201,35 +177,37 @@ func (in *ExtenderPreemptionArgs) DeepCopyInto(out *ExtenderPreemptionArgs) {
|
||||
*out = *in
|
||||
if in.Pod != nil {
|
||||
in, out := &in.Pod, &out.Pod
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(v1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeNameToVictims != nil {
|
||||
in, out := &in.NodeNameToVictims, &out.NodeNameToVictims
|
||||
*out = make(map[string]*Victims, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *Victims
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(Victims)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(Victims)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.NodeNameToMetaVictims != nil {
|
||||
in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims
|
||||
*out = make(map[string]*MetaVictims, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *MetaVictims
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(MetaVictims)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(MetaVictims)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -252,12 +230,15 @@ func (in *ExtenderPreemptionResult) DeepCopyInto(out *ExtenderPreemptionResult)
|
||||
in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims
|
||||
*out = make(map[string]*MetaVictims, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal *MetaVictims
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(MetaVictims)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
in, out := &val, &outVal
|
||||
*out = new(MetaVictims)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -391,11 +372,10 @@ func (in *MetaVictims) DeepCopyInto(out *MetaVictims) {
|
||||
in, out := &in.Pods, &out.Pods
|
||||
*out = make([]*MetaPod, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] == nil {
|
||||
(*out)[i] = nil
|
||||
} else {
|
||||
(*out)[i] = new(MetaPod)
|
||||
(*in)[i].DeepCopyInto((*out)[i])
|
||||
if (*in)[i] != nil {
|
||||
in, out := &(*in)[i], &(*out)[i]
|
||||
*out = new(MetaPod)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -463,21 +443,13 @@ func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAffinity != nil {
|
||||
in, out := &in.ServiceAffinity, &out.ServiceAffinity
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ServiceAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(ServiceAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.LabelsPresence != nil {
|
||||
in, out := &in.LabelsPresence, &out.LabelsPresence
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(LabelsPresence)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(LabelsPresence)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -497,12 +469,8 @@ func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) {
|
||||
*out = *in
|
||||
if in.Argument != nil {
|
||||
in, out := &in.Argument, &out.Argument
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PredicateArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(PredicateArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -522,30 +490,18 @@ func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAntiAffinity != nil {
|
||||
in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ServiceAntiAffinity)
|
||||
**out = **in
|
||||
}
|
||||
*out = new(ServiceAntiAffinity)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelPreference != nil {
|
||||
in, out := &in.LabelPreference, &out.LabelPreference
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(LabelPreference)
|
||||
**out = **in
|
||||
}
|
||||
*out = new(LabelPreference)
|
||||
**out = **in
|
||||
}
|
||||
if in.RequestedToCapacityRatioArguments != nil {
|
||||
in, out := &in.RequestedToCapacityRatioArguments, &out.RequestedToCapacityRatioArguments
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(RequestedToCapacityRatioArguments)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(RequestedToCapacityRatioArguments)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -565,12 +521,8 @@ func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) {
|
||||
*out = *in
|
||||
if in.Argument != nil {
|
||||
in, out := &in.Argument, &out.Argument
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PriorityArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
*out = new(PriorityArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -666,11 +618,10 @@ func (in *Victims) DeepCopyInto(out *Victims) {
|
||||
in, out := &in.Pods, &out.Pods
|
||||
*out = make([]*v1.Pod, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] == nil {
|
||||
(*out)[i] = nil
|
||||
} else {
|
||||
(*out)[i] = new(v1.Pod)
|
||||
(*in)[i].DeepCopyInto((*out)[i])
|
||||
if (*in)[i] != nil {
|
||||
in, out := &(*in)[i], &(*out)[i]
|
||||
*out = new(v1.Pod)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
39
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/apis/config",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/config:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/apis/config/scheme:all-srcs",
|
||||
"//pkg/scheduler/apis/config/v1alpha1:all-srcs",
|
||||
"//pkg/scheduler/apis/config/validation:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
11
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/OWNERS
generated
vendored
Executable file
11
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/OWNERS
generated
vendored
Executable file
@ -0,0 +1,11 @@
|
||||
approvers:
|
||||
- api-approvers
|
||||
- sig-scheduling-maintainers
|
||||
- sttts
|
||||
- luxas
|
||||
reviewers:
|
||||
- sig-scheduling
|
||||
- api-reviewers
|
||||
- dixudx
|
||||
- luxas
|
||||
- sttts
|
20
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/doc.go
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/doc.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=kubescheduler.config.k8s.io
|
||||
|
||||
package config // import "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
43
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name used in this package
|
||||
const GroupName = "kubescheduler.config.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
var (
|
||||
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
// AddToScheme is a global function that registers this API group & version to a scheme
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// addKnownTypes registers known types to the given scheme
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&KubeSchedulerConfiguration{},
|
||||
)
|
||||
return nil
|
||||
}
|
29
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/BUILD
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/BUILD
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["scheme.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/apis/config/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
44
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheme
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
kubeschedulerconfigv1alpha1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Scheme is the runtime.Scheme to which all kubescheduler api types are registered.
|
||||
Scheme = runtime.NewScheme()
|
||||
|
||||
// Codecs provides access to encoding and decoding for the scheme.
|
||||
Codecs = serializer.NewCodecFactory(Scheme)
|
||||
)
|
||||
|
||||
func init() {
|
||||
AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
// AddToScheme builds the kubescheduler scheme using all known versions of the kubescheduler api.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
utilruntime.Must(kubeschedulerconfig.AddToScheme(Scheme))
|
||||
utilruntime.Must(kubeschedulerconfigv1alpha1.AddToScheme(Scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(kubeschedulerconfigv1alpha1.SchemeGroupVersion))
|
||||
}
|
137
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go
generated
vendored
Normal file
137
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
apimachineryconfig "k8s.io/apimachinery/pkg/apis/config"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserverconfig "k8s.io/apiserver/pkg/apis/config"
|
||||
)
|
||||
|
||||
const (
|
||||
// SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system")
|
||||
SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem
|
||||
|
||||
// SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler")
|
||||
SchedulerDefaultLockObjectName = "kube-scheduler"
|
||||
|
||||
// SchedulerPolicyConfigMapKey defines the key of the element in the
|
||||
// scheduler's policy ConfigMap that contains scheduler's policy config.
|
||||
SchedulerPolicyConfigMapKey = "policy.cfg"
|
||||
|
||||
// SchedulerDefaultProviderName defines the default provider names
|
||||
SchedulerDefaultProviderName = "DefaultProvider"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// KubeSchedulerConfiguration configures a scheduler
|
||||
type KubeSchedulerConfiguration struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// SchedulerName is name of the scheduler, used to select which pods
|
||||
// will be processed by this scheduler, based on pod's "spec.SchedulerName".
|
||||
SchedulerName string
|
||||
// AlgorithmSource specifies the scheduler algorithm source.
|
||||
AlgorithmSource SchedulerAlgorithmSource
|
||||
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
|
||||
HardPodAffinitySymmetricWeight int32
|
||||
|
||||
// LeaderElection defines the configuration of leader election client.
|
||||
LeaderElection KubeSchedulerLeaderElectionConfiguration
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection
|
||||
// settings for the proxy server to use when communicating with the apiserver.
|
||||
ClientConnection apimachineryconfig.ClientConnectionConfiguration
|
||||
// HealthzBindAddress is the IP address and port for the health check server to serve on,
|
||||
// defaulting to 0.0.0.0:10251
|
||||
HealthzBindAddress string
|
||||
// MetricsBindAddress is the IP address and port for the metrics server to
|
||||
// serve on, defaulting to 0.0.0.0:10251.
|
||||
MetricsBindAddress string
|
||||
|
||||
// DebuggingConfiguration holds configuration for Debugging related features
|
||||
// TODO: We might wanna make this a substruct like Debugging apiserverconfig.DebuggingConfiguration
|
||||
apiserverconfig.DebuggingConfiguration
|
||||
|
||||
// DisablePreemption disables the pod preemption feature.
|
||||
DisablePreemption bool
|
||||
|
||||
// PercentageOfNodeToScore is the percentage of all nodes that once found feasible
|
||||
// for running a pod, the scheduler stops its search for more feasible nodes in
|
||||
// the cluster. This helps improve scheduler's performance. Scheduler always tries to find
|
||||
// at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is.
|
||||
// Example: if the cluster size is 500 nodes and the value of this flag is 30,
|
||||
// then scheduler stops finding further feasible nodes once it finds 150 feasible ones.
|
||||
// When the value is 0, default percentage (50%) of the nodes will be scored.
|
||||
PercentageOfNodesToScore int32
|
||||
|
||||
// DEPRECATED.
|
||||
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
FailureDomains string
|
||||
|
||||
// Duration to wait for a binding operation to complete before timing out
|
||||
// Value must be non-negative integer. The value zero indicates no waiting.
|
||||
// If this value is nil, the default value will be used.
|
||||
BindTimeoutSeconds *int64
|
||||
}
|
||||
|
||||
// SchedulerAlgorithmSource is the source of a scheduler algorithm. One source
|
||||
// field must be specified, and source fields are mutually exclusive.
|
||||
type SchedulerAlgorithmSource struct {
|
||||
// Policy is a policy based algorithm source.
|
||||
Policy *SchedulerPolicySource
|
||||
// Provider is the name of a scheduling algorithm provider to use.
|
||||
Provider *string
|
||||
}
|
||||
|
||||
// SchedulerPolicySource configures a means to obtain a scheduler Policy. One
|
||||
// source field must be specified, and source fields are mutually exclusive.
|
||||
type SchedulerPolicySource struct {
|
||||
// File is a file policy source.
|
||||
File *SchedulerPolicyFileSource
|
||||
// ConfigMap is a config map policy source.
|
||||
ConfigMap *SchedulerPolicyConfigMapSource
|
||||
}
|
||||
|
||||
// SchedulerPolicyFileSource is a policy serialized to disk and accessed via
|
||||
// path.
|
||||
type SchedulerPolicyFileSource struct {
|
||||
// Path is the location of a serialized policy.
|
||||
Path string
|
||||
}
|
||||
|
||||
// SchedulerPolicyConfigMapSource is a policy serialized into a config map value
|
||||
// under the SchedulerPolicyConfigMapKey key.
|
||||
type SchedulerPolicyConfigMapSource struct {
|
||||
// Namespace is the namespace of the policy config map.
|
||||
Namespace string
|
||||
// Name is the name of hte policy config map.
|
||||
Name string
|
||||
}
|
||||
|
||||
// KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration
|
||||
// to include scheduler specific configuration.
|
||||
type KubeSchedulerLeaderElectionConfiguration struct {
|
||||
apiserverconfig.LeaderElectionConfiguration
|
||||
// LockObjectNamespace defines the namespace of the lock object
|
||||
LockObjectNamespace string
|
||||
// LockObjectName defines the lock object name
|
||||
LockObjectName string
|
||||
}
|
53
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/BUILD
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/BUILD
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"defaults.go",
|
||||
"doc.go",
|
||||
"register.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/config/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/config/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["defaults_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
111
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/defaults.go
generated
vendored
Normal file
111
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/defaults.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiserverconfigv1alpha1 "k8s.io/apiserver/pkg/apis/config/v1alpha1"
|
||||
kubescedulerconfigv1alpha1 "k8s.io/kube-scheduler/config/v1alpha1"
|
||||
|
||||
// this package shouldn't really depend on other k8s.io/kubernetes code
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
// SetDefaults_KubeSchedulerConfiguration sets additional defaults
|
||||
func SetDefaults_KubeSchedulerConfiguration(obj *kubescedulerconfigv1alpha1.KubeSchedulerConfiguration) {
|
||||
if len(obj.SchedulerName) == 0 {
|
||||
obj.SchedulerName = api.DefaultSchedulerName
|
||||
}
|
||||
|
||||
if obj.HardPodAffinitySymmetricWeight == 0 {
|
||||
obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight
|
||||
}
|
||||
|
||||
if obj.AlgorithmSource.Policy == nil &&
|
||||
(obj.AlgorithmSource.Provider == nil || len(*obj.AlgorithmSource.Provider) == 0) {
|
||||
val := kubescedulerconfigv1alpha1.SchedulerDefaultProviderName
|
||||
obj.AlgorithmSource.Provider = &val
|
||||
}
|
||||
|
||||
if policy := obj.AlgorithmSource.Policy; policy != nil {
|
||||
if policy.ConfigMap != nil && len(policy.ConfigMap.Namespace) == 0 {
|
||||
obj.AlgorithmSource.Policy.ConfigMap.Namespace = api.NamespaceSystem
|
||||
}
|
||||
}
|
||||
|
||||
if host, port, err := net.SplitHostPort(obj.HealthzBindAddress); err == nil {
|
||||
if len(host) == 0 {
|
||||
host = "0.0.0.0"
|
||||
}
|
||||
obj.HealthzBindAddress = net.JoinHostPort(host, port)
|
||||
} else {
|
||||
obj.HealthzBindAddress = net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.InsecureSchedulerPort))
|
||||
}
|
||||
|
||||
if host, port, err := net.SplitHostPort(obj.MetricsBindAddress); err == nil {
|
||||
if len(host) == 0 {
|
||||
host = "0.0.0.0"
|
||||
}
|
||||
obj.MetricsBindAddress = net.JoinHostPort(host, port)
|
||||
} else {
|
||||
obj.MetricsBindAddress = net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.InsecureSchedulerPort))
|
||||
}
|
||||
|
||||
if len(obj.LeaderElection.LockObjectNamespace) == 0 {
|
||||
obj.LeaderElection.LockObjectNamespace = kubescedulerconfigv1alpha1.SchedulerDefaultLockObjectNamespace
|
||||
}
|
||||
if len(obj.LeaderElection.LockObjectName) == 0 {
|
||||
obj.LeaderElection.LockObjectName = kubescedulerconfigv1alpha1.SchedulerDefaultLockObjectName
|
||||
}
|
||||
|
||||
if obj.PercentageOfNodesToScore == 0 {
|
||||
// by default, stop finding feasible nodes once the number of feasible nodes is 50% of the cluster.
|
||||
obj.PercentageOfNodesToScore = 50
|
||||
}
|
||||
|
||||
if len(obj.FailureDomains) == 0 {
|
||||
obj.FailureDomains = kubeletapis.DefaultFailureDomains
|
||||
}
|
||||
|
||||
if len(obj.ClientConnection.ContentType) == 0 {
|
||||
obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf"
|
||||
}
|
||||
// Scheduler has an opinion about QPS/Burst, setting specific defaults for itself, instead of generic settings.
|
||||
if obj.ClientConnection.QPS == 0.0 {
|
||||
obj.ClientConnection.QPS = 50.0
|
||||
}
|
||||
if obj.ClientConnection.Burst == 0 {
|
||||
obj.ClientConnection.Burst = 100
|
||||
}
|
||||
|
||||
// Use the default LeaderElectionConfiguration options
|
||||
apiserverconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection.LeaderElectionConfiguration)
|
||||
|
||||
if obj.BindTimeoutSeconds == nil {
|
||||
defaultBindTimeoutSeconds := int64(600)
|
||||
obj.BindTimeoutSeconds = &defaultBindTimeoutSeconds
|
||||
}
|
||||
}
|
64
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/defaults_test.go
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/defaults_test.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubeschedulerconfigv1alpha1 "k8s.io/kube-scheduler/config/v1alpha1"
|
||||
)
|
||||
|
||||
func TestSchedulerDefaults(t *testing.T) {
|
||||
ks1 := &kubeschedulerconfigv1alpha1.KubeSchedulerConfiguration{}
|
||||
SetDefaults_KubeSchedulerConfiguration(ks1)
|
||||
cm, err := convertObjToConfigMap("KubeSchedulerConfiguration", ks1)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected ConvertObjToConfigMap error %v", err)
|
||||
}
|
||||
|
||||
ks2 := &kubeschedulerconfigv1alpha1.KubeSchedulerConfiguration{}
|
||||
if err = json.Unmarshal([]byte(cm.Data["KubeSchedulerConfiguration"]), ks2); err != nil {
|
||||
t.Errorf("unexpected error unserializing scheduler config %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ks2, ks1) {
|
||||
t.Errorf("Expected:\n%#v\n\nGot:\n%#v", ks1, ks2)
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertObjToConfigMap converts an object to a ConfigMap.
|
||||
// This is specifically meant for ComponentConfigs.
|
||||
func convertObjToConfigMap(name string, obj runtime.Object) (*v1.ConfigMap, error) {
|
||||
eJSONBytes, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cm := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
name: string(eJSONBytes[:]),
|
||||
},
|
||||
}
|
||||
return cm, nil
|
||||
}
|
24
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/doc.go
generated
vendored
Normal file
24
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/doc.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/scheduler/apis/config
|
||||
// +k8s:conversion-gen-external-types=k8s.io/kube-scheduler/config/v1alpha1
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +k8s:defaulter-gen-input=../../../../../vendor/k8s.io/kube-scheduler/config/v1alpha1
|
||||
// +groupName=kubescheduler.config.k8s.io
|
||||
|
||||
package v1alpha1 // import "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1"
|
43
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/register.go
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/register.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeschedulerconfigv1alpha1 "k8s.io/kube-scheduler/config/v1alpha1"
|
||||
)
|
||||
|
||||
// GroupName is the group name used in this package
|
||||
const GroupName = "kubescheduler.config.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
// localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package,
|
||||
// defaulting and conversion init funcs are registered as well.
|
||||
localSchemeBuilder = &kubeschedulerconfigv1alpha1.SchemeBuilder
|
||||
// AddToScheme is a global function that registers this API group & version to a scheme
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addDefaultingFuncs)
|
||||
}
|
274
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.conversion.go
generated
vendored
Normal file
274
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.conversion.go
generated
vendored
Normal file
@ -0,0 +1,274 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by conversion-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
configv1alpha1 "k8s.io/apimachinery/pkg/apis/config/v1alpha1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
apisconfigv1alpha1 "k8s.io/apiserver/pkg/apis/config/v1alpha1"
|
||||
v1alpha1 "k8s.io/kube-scheduler/config/v1alpha1"
|
||||
config "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*v1alpha1.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*config.KubeSchedulerConfiguration)(nil), (*v1alpha1.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(a.(*config.KubeSchedulerConfiguration), b.(*v1alpha1.KubeSchedulerConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeSchedulerLeaderElectionConfiguration)(nil), (*config.KubeSchedulerLeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(a.(*v1alpha1.KubeSchedulerLeaderElectionConfiguration), b.(*config.KubeSchedulerLeaderElectionConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*config.KubeSchedulerLeaderElectionConfiguration)(nil), (*v1alpha1.KubeSchedulerLeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(a.(*config.KubeSchedulerLeaderElectionConfiguration), b.(*v1alpha1.KubeSchedulerLeaderElectionConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.SchedulerAlgorithmSource)(nil), (*config.SchedulerAlgorithmSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(a.(*v1alpha1.SchedulerAlgorithmSource), b.(*config.SchedulerAlgorithmSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*config.SchedulerAlgorithmSource)(nil), (*v1alpha1.SchedulerAlgorithmSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(a.(*config.SchedulerAlgorithmSource), b.(*v1alpha1.SchedulerAlgorithmSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.SchedulerPolicyConfigMapSource)(nil), (*config.SchedulerPolicyConfigMapSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource(a.(*v1alpha1.SchedulerPolicyConfigMapSource), b.(*config.SchedulerPolicyConfigMapSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*config.SchedulerPolicyConfigMapSource)(nil), (*v1alpha1.SchedulerPolicyConfigMapSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(a.(*config.SchedulerPolicyConfigMapSource), b.(*v1alpha1.SchedulerPolicyConfigMapSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.SchedulerPolicyFileSource)(nil), (*config.SchedulerPolicyFileSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource(a.(*v1alpha1.SchedulerPolicyFileSource), b.(*config.SchedulerPolicyFileSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*config.SchedulerPolicyFileSource)(nil), (*v1alpha1.SchedulerPolicyFileSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(a.(*config.SchedulerPolicyFileSource), b.(*v1alpha1.SchedulerPolicyFileSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.SchedulerPolicySource)(nil), (*config.SchedulerPolicySource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource(a.(*v1alpha1.SchedulerPolicySource), b.(*config.SchedulerPolicySource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*config.SchedulerPolicySource)(nil), (*v1alpha1.SchedulerPolicySource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(a.(*config.SchedulerPolicySource), b.(*v1alpha1.SchedulerPolicySource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1alpha1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error {
|
||||
out.SchedulerName = in.SchedulerName
|
||||
if err := Convert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(&in.AlgorithmSource, &out.AlgorithmSource, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
|
||||
if err := Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := configv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.HealthzBindAddress = in.HealthzBindAddress
|
||||
out.MetricsBindAddress = in.MetricsBindAddress
|
||||
if err := apisconfigv1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.DisablePreemption = in.DisablePreemption
|
||||
out.PercentageOfNodesToScore = in.PercentageOfNodesToScore
|
||||
out.FailureDomains = in.FailureDomains
|
||||
out.BindTimeoutSeconds = (*int64)(unsafe.Pointer(in.BindTimeoutSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1alpha1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1alpha1.KubeSchedulerConfiguration, s conversion.Scope) error {
|
||||
out.SchedulerName = in.SchedulerName
|
||||
if err := Convert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(&in.AlgorithmSource, &out.AlgorithmSource, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
|
||||
if err := Convert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := configv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.HealthzBindAddress = in.HealthzBindAddress
|
||||
out.MetricsBindAddress = in.MetricsBindAddress
|
||||
if err := apisconfigv1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.DisablePreemption = in.DisablePreemption
|
||||
out.PercentageOfNodesToScore = in.PercentageOfNodesToScore
|
||||
out.FailureDomains = in.FailureDomains
|
||||
out.BindTimeoutSeconds = (*int64)(unsafe.Pointer(in.BindTimeoutSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration is an autogenerated conversion function.
|
||||
func Convert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1alpha1.KubeSchedulerConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_config_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(in *v1alpha1.KubeSchedulerLeaderElectionConfiguration, out *config.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error {
|
||||
if err := apisconfigv1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElectionConfiguration, &out.LeaderElectionConfiguration, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.LockObjectNamespace = in.LockObjectNamespace
|
||||
out.LockObjectName = in.LockObjectName
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(in *v1alpha1.KubeSchedulerLeaderElectionConfiguration, out *config.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_config_KubeSchedulerLeaderElectionConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in *config.KubeSchedulerLeaderElectionConfiguration, out *v1alpha1.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error {
|
||||
if err := apisconfigv1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElectionConfiguration, &out.LeaderElectionConfiguration, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.LockObjectNamespace = in.LockObjectNamespace
|
||||
out.LockObjectName = in.LockObjectName
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration is an autogenerated conversion function.
|
||||
func Convert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in *config.KubeSchedulerLeaderElectionConfiguration, out *v1alpha1.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_config_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(in *v1alpha1.SchedulerAlgorithmSource, out *config.SchedulerAlgorithmSource, s conversion.Scope) error {
|
||||
out.Policy = (*config.SchedulerPolicySource)(unsafe.Pointer(in.Policy))
|
||||
out.Provider = (*string)(unsafe.Pointer(in.Provider))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(in *v1alpha1.SchedulerAlgorithmSource, out *config.SchedulerAlgorithmSource, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_SchedulerAlgorithmSource_To_config_SchedulerAlgorithmSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in *config.SchedulerAlgorithmSource, out *v1alpha1.SchedulerAlgorithmSource, s conversion.Scope) error {
|
||||
out.Policy = (*v1alpha1.SchedulerPolicySource)(unsafe.Pointer(in.Policy))
|
||||
out.Provider = (*string)(unsafe.Pointer(in.Provider))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource is an autogenerated conversion function.
|
||||
func Convert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in *config.SchedulerAlgorithmSource, out *v1alpha1.SchedulerAlgorithmSource, s conversion.Scope) error {
|
||||
return autoConvert_config_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource(in *v1alpha1.SchedulerPolicyConfigMapSource, out *config.SchedulerPolicyConfigMapSource, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource(in *v1alpha1.SchedulerPolicyConfigMapSource, out *config.SchedulerPolicyConfigMapSource, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_SchedulerPolicyConfigMapSource_To_config_SchedulerPolicyConfigMapSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in *config.SchedulerPolicyConfigMapSource, out *v1alpha1.SchedulerPolicyConfigMapSource, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource is an autogenerated conversion function.
|
||||
func Convert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in *config.SchedulerPolicyConfigMapSource, out *v1alpha1.SchedulerPolicyConfigMapSource, s conversion.Scope) error {
|
||||
return autoConvert_config_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource(in *v1alpha1.SchedulerPolicyFileSource, out *config.SchedulerPolicyFileSource, s conversion.Scope) error {
|
||||
out.Path = in.Path
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource(in *v1alpha1.SchedulerPolicyFileSource, out *config.SchedulerPolicyFileSource, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_SchedulerPolicyFileSource_To_config_SchedulerPolicyFileSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in *config.SchedulerPolicyFileSource, out *v1alpha1.SchedulerPolicyFileSource, s conversion.Scope) error {
|
||||
out.Path = in.Path
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource is an autogenerated conversion function.
|
||||
func Convert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in *config.SchedulerPolicyFileSource, out *v1alpha1.SchedulerPolicyFileSource, s conversion.Scope) error {
|
||||
return autoConvert_config_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource(in *v1alpha1.SchedulerPolicySource, out *config.SchedulerPolicySource, s conversion.Scope) error {
|
||||
out.File = (*config.SchedulerPolicyFileSource)(unsafe.Pointer(in.File))
|
||||
out.ConfigMap = (*config.SchedulerPolicyConfigMapSource)(unsafe.Pointer(in.ConfigMap))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource(in *v1alpha1.SchedulerPolicySource, out *config.SchedulerPolicySource, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_SchedulerPolicySource_To_config_SchedulerPolicySource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in *config.SchedulerPolicySource, out *v1alpha1.SchedulerPolicySource, s conversion.Scope) error {
|
||||
out.File = (*v1alpha1.SchedulerPolicyFileSource)(unsafe.Pointer(in.File))
|
||||
out.ConfigMap = (*v1alpha1.SchedulerPolicyConfigMapSource)(unsafe.Pointer(in.ConfigMap))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource is an autogenerated conversion function.
|
||||
func Convert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in *config.SchedulerPolicySource, out *v1alpha1.SchedulerPolicySource, s conversion.Scope) error {
|
||||
return autoConvert_config_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in, out, s)
|
||||
}
|
21
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
40
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.defaults.go
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1/zz_generated.defaults.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
v1alpha1 "k8s.io/kube-scheduler/config/v1alpha1"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
scheme.AddTypeDefaultingFunc(&v1alpha1.KubeSchedulerConfiguration{}, func(obj interface{}) {
|
||||
SetObjectDefaults_KubeSchedulerConfiguration(obj.(*v1alpha1.KubeSchedulerConfiguration))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetObjectDefaults_KubeSchedulerConfiguration(in *v1alpha1.KubeSchedulerConfiguration) {
|
||||
SetDefaults_KubeSchedulerConfiguration(in)
|
||||
}
|
41
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/BUILD
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/BUILD
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validation.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/apis/config/validation",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/config/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/config/validation:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validation_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/config:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
68
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
apimachinery "k8s.io/apimachinery/pkg/apis/config/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
apiserver "k8s.io/apiserver/pkg/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
|
||||
// ValidateKubeSchedulerConfiguration ensures validation of the KubeSchedulerConfiguration struct
|
||||
func ValidateKubeSchedulerConfiguration(cc *config.KubeSchedulerConfiguration) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apimachinery.ValidateClientConnectionConfiguration(&cc.ClientConnection, field.NewPath("clientConnection"))...)
|
||||
allErrs = append(allErrs, ValidateKubeSchedulerLeaderElectionConfiguration(&cc.LeaderElection, field.NewPath("leaderElection"))...)
|
||||
if len(cc.SchedulerName) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("schedulerName"), ""))
|
||||
}
|
||||
for _, msg := range validation.IsValidSocketAddr(cc.HealthzBindAddress) {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("healthzBindAddress"), cc.HealthzBindAddress, msg))
|
||||
}
|
||||
for _, msg := range validation.IsValidSocketAddr(cc.MetricsBindAddress) {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("metricsBindAddress"), cc.MetricsBindAddress, msg))
|
||||
}
|
||||
if cc.HardPodAffinitySymmetricWeight < 0 || cc.HardPodAffinitySymmetricWeight > 100 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("hardPodAffinitySymmetricWeight"), cc.HardPodAffinitySymmetricWeight, "not in valid range 0-100"))
|
||||
}
|
||||
if cc.BindTimeoutSeconds == nil {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("bindTimeoutSeconds"), ""))
|
||||
}
|
||||
if cc.PercentageOfNodesToScore < 0 || cc.PercentageOfNodesToScore > 100 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("percentageOfNodesToScore"),
|
||||
cc.PercentageOfNodesToScore, "not in valid range 0-100"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateKubeSchedulerLeaderElectionConfiguration ensures validation of the KubeSchedulerLeaderElectionConfiguration struct
|
||||
func ValidateKubeSchedulerLeaderElectionConfiguration(cc *config.KubeSchedulerLeaderElectionConfiguration, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if !cc.LeaderElectionConfiguration.LeaderElect {
|
||||
return allErrs
|
||||
}
|
||||
allErrs = append(allErrs, apiserver.ValidateLeaderElectionConfiguration(&cc.LeaderElectionConfiguration, field.NewPath("leaderElectionConfiguration"))...)
|
||||
if len(cc.LockObjectNamespace) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("lockObjectNamespace"), ""))
|
||||
}
|
||||
if len(cc.LockObjectName) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("lockObjectName"), ""))
|
||||
}
|
||||
return allErrs
|
||||
}
|
158
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_test.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_test.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apimachinery "k8s.io/apimachinery/pkg/apis/config"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserver "k8s.io/apiserver/pkg/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
|
||||
func TestValidateKubeSchedulerConfiguration(t *testing.T) {
|
||||
testTimeout := int64(0)
|
||||
validConfig := &config.KubeSchedulerConfiguration{
|
||||
SchedulerName: "me",
|
||||
HealthzBindAddress: "0.0.0.0:10254",
|
||||
MetricsBindAddress: "0.0.0.0:10254",
|
||||
HardPodAffinitySymmetricWeight: 80,
|
||||
ClientConnection: apimachinery.ClientConnectionConfiguration{
|
||||
AcceptContentTypes: "application/json",
|
||||
ContentType: "application/json",
|
||||
QPS: 10,
|
||||
Burst: 10,
|
||||
},
|
||||
AlgorithmSource: config.SchedulerAlgorithmSource{
|
||||
Policy: &config.SchedulerPolicySource{
|
||||
ConfigMap: &config.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "name",
|
||||
Name: "name",
|
||||
},
|
||||
},
|
||||
},
|
||||
LeaderElection: config.KubeSchedulerLeaderElectionConfiguration{
|
||||
LockObjectNamespace: "name",
|
||||
LockObjectName: "name",
|
||||
LeaderElectionConfiguration: apiserver.LeaderElectionConfiguration{
|
||||
ResourceLock: "configmap",
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 30 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 15 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
BindTimeoutSeconds: &testTimeout,
|
||||
PercentageOfNodesToScore: 35,
|
||||
}
|
||||
|
||||
HardPodAffinitySymmetricWeightGt100 := validConfig.DeepCopy()
|
||||
HardPodAffinitySymmetricWeightGt100.HardPodAffinitySymmetricWeight = 120
|
||||
|
||||
HardPodAffinitySymmetricWeightLt0 := validConfig.DeepCopy()
|
||||
HardPodAffinitySymmetricWeightLt0.HardPodAffinitySymmetricWeight = -1
|
||||
|
||||
lockObjectNameNotSet := validConfig.DeepCopy()
|
||||
lockObjectNameNotSet.LeaderElection.LockObjectName = ""
|
||||
|
||||
lockObjectNamespaceNotSet := validConfig.DeepCopy()
|
||||
lockObjectNamespaceNotSet.LeaderElection.LockObjectNamespace = ""
|
||||
|
||||
metricsBindAddrHostInvalid := validConfig.DeepCopy()
|
||||
metricsBindAddrHostInvalid.MetricsBindAddress = "0.0.0.0.0:9090"
|
||||
|
||||
metricsBindAddrPortInvalid := validConfig.DeepCopy()
|
||||
metricsBindAddrPortInvalid.MetricsBindAddress = "0.0.0.0:909090"
|
||||
|
||||
healthzBindAddrHostInvalid := validConfig.DeepCopy()
|
||||
healthzBindAddrHostInvalid.HealthzBindAddress = "0.0.0.0.0:9090"
|
||||
|
||||
healthzBindAddrPortInvalid := validConfig.DeepCopy()
|
||||
healthzBindAddrPortInvalid.HealthzBindAddress = "0.0.0.0:909090"
|
||||
|
||||
enableContentProfilingSetWithoutEnableProfiling := validConfig.DeepCopy()
|
||||
enableContentProfilingSetWithoutEnableProfiling.EnableProfiling = false
|
||||
enableContentProfilingSetWithoutEnableProfiling.EnableContentionProfiling = true
|
||||
|
||||
bindTimeoutUnset := validConfig.DeepCopy()
|
||||
bindTimeoutUnset.BindTimeoutSeconds = nil
|
||||
|
||||
percentageOfNodesToScore101 := validConfig.DeepCopy()
|
||||
percentageOfNodesToScore101.PercentageOfNodesToScore = int32(101)
|
||||
|
||||
scenarios := map[string]struct {
|
||||
expectedToFail bool
|
||||
config *config.KubeSchedulerConfiguration
|
||||
}{
|
||||
"good": {
|
||||
expectedToFail: false,
|
||||
config: validConfig,
|
||||
},
|
||||
"bad-lock-object-names-not-set": {
|
||||
expectedToFail: true,
|
||||
config: lockObjectNameNotSet,
|
||||
},
|
||||
"bad-lock-object-namespace-not-set": {
|
||||
expectedToFail: true,
|
||||
config: lockObjectNamespaceNotSet,
|
||||
},
|
||||
"bad-healthz-port-invalid": {
|
||||
expectedToFail: true,
|
||||
config: healthzBindAddrPortInvalid,
|
||||
},
|
||||
"bad-healthz-host-invalid": {
|
||||
expectedToFail: true,
|
||||
config: healthzBindAddrHostInvalid,
|
||||
},
|
||||
"bad-metrics-port-invalid": {
|
||||
expectedToFail: true,
|
||||
config: metricsBindAddrPortInvalid,
|
||||
},
|
||||
"bad-metrics-host-invalid": {
|
||||
expectedToFail: true,
|
||||
config: metricsBindAddrHostInvalid,
|
||||
},
|
||||
"bad-hard-pod-affinity-symmetric-weight-lt-0": {
|
||||
expectedToFail: true,
|
||||
config: HardPodAffinitySymmetricWeightGt100,
|
||||
},
|
||||
"bad-hard-pod-affinity-symmetric-weight-gt-100": {
|
||||
expectedToFail: true,
|
||||
config: HardPodAffinitySymmetricWeightLt0,
|
||||
},
|
||||
"bind-timeout-unset": {
|
||||
expectedToFail: true,
|
||||
config: bindTimeoutUnset,
|
||||
},
|
||||
"bad-percentage-of-nodes-to-score": {
|
||||
expectedToFail: true,
|
||||
config: percentageOfNodesToScore101,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
errs := ValidateKubeSchedulerConfiguration(scenario.config)
|
||||
if len(errs) == 0 && scenario.expectedToFail {
|
||||
t.Errorf("Unexpected success for scenario: %s", name)
|
||||
}
|
||||
if len(errs) > 0 && !scenario.expectedToFail {
|
||||
t.Errorf("Unexpected failure for scenario: %s - %+v", name, errs)
|
||||
}
|
||||
}
|
||||
}
|
160
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go
generated
vendored
Normal file
160
vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.AlgorithmSource.DeepCopyInto(&out.AlgorithmSource)
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.ClientConnection = in.ClientConnection
|
||||
out.DebuggingConfiguration = in.DebuggingConfiguration
|
||||
if in.BindTimeoutSeconds != nil {
|
||||
in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration.
|
||||
func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeSchedulerConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopyInto(out *KubeSchedulerLeaderElectionConfiguration) {
|
||||
*out = *in
|
||||
out.LeaderElectionConfiguration = in.LeaderElectionConfiguration
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerLeaderElectionConfiguration.
|
||||
func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopy() *KubeSchedulerLeaderElectionConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeSchedulerLeaderElectionConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SchedulerAlgorithmSource) DeepCopyInto(out *SchedulerAlgorithmSource) {
|
||||
*out = *in
|
||||
if in.Policy != nil {
|
||||
in, out := &in.Policy, &out.Policy
|
||||
*out = new(SchedulerPolicySource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Provider != nil {
|
||||
in, out := &in.Provider, &out.Provider
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerAlgorithmSource.
|
||||
func (in *SchedulerAlgorithmSource) DeepCopy() *SchedulerAlgorithmSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SchedulerAlgorithmSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SchedulerPolicyConfigMapSource) DeepCopyInto(out *SchedulerPolicyConfigMapSource) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicyConfigMapSource.
|
||||
func (in *SchedulerPolicyConfigMapSource) DeepCopy() *SchedulerPolicyConfigMapSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SchedulerPolicyConfigMapSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SchedulerPolicyFileSource) DeepCopyInto(out *SchedulerPolicyFileSource) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicyFileSource.
|
||||
func (in *SchedulerPolicyFileSource) DeepCopy() *SchedulerPolicyFileSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SchedulerPolicyFileSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SchedulerPolicySource) DeepCopyInto(out *SchedulerPolicySource) {
|
||||
*out = *in
|
||||
if in.File != nil {
|
||||
in, out := &in.File, &out.File
|
||||
*out = new(SchedulerPolicyFileSource)
|
||||
**out = **in
|
||||
}
|
||||
if in.ConfigMap != nil {
|
||||
in, out := &in.ConfigMap, &out.ConfigMap
|
||||
*out = new(SchedulerPolicyConfigMapSource)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicySource.
|
||||
func (in *SchedulerPolicySource) DeepCopy() *SchedulerPolicySource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SchedulerPolicySource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
37
vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD
generated
vendored
@ -3,8 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cache.go",
|
||||
"interface.go",
|
||||
"host_ports.go",
|
||||
"node_info.go",
|
||||
"util.go",
|
||||
],
|
||||
@ -12,40 +11,28 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_test.go",
|
||||
"host_ports_test.go",
|
||||
"node_info_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
135
vendor/k8s.io/kubernetes/pkg/scheduler/cache/host_ports.go
generated
vendored
Normal file
135
vendor/k8s.io/kubernetes/pkg/scheduler/cache/host_ports.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// DefaultBindAllHostIP defines the default ip address used to bind to all host.
|
||||
const DefaultBindAllHostIP = "0.0.0.0"
|
||||
|
||||
// ProtocolPort represents a protocol port pair, e.g. tcp:80.
|
||||
type ProtocolPort struct {
|
||||
Protocol string
|
||||
Port int32
|
||||
}
|
||||
|
||||
// NewProtocolPort creates a ProtocolPort instance.
|
||||
func NewProtocolPort(protocol string, port int32) *ProtocolPort {
|
||||
pp := &ProtocolPort{
|
||||
Protocol: protocol,
|
||||
Port: port,
|
||||
}
|
||||
|
||||
if len(pp.Protocol) == 0 {
|
||||
pp.Protocol = string(v1.ProtocolTCP)
|
||||
}
|
||||
|
||||
return pp
|
||||
}
|
||||
|
||||
// HostPortInfo stores mapping from ip to a set of ProtocolPort
|
||||
type HostPortInfo map[string]map[ProtocolPort]struct{}
|
||||
|
||||
// Add adds (ip, protocol, port) to HostPortInfo
|
||||
func (h HostPortInfo) Add(ip, protocol string, port int32) {
|
||||
if port <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
h.sanitize(&ip, &protocol)
|
||||
|
||||
pp := NewProtocolPort(protocol, port)
|
||||
if _, ok := h[ip]; !ok {
|
||||
h[ip] = map[ProtocolPort]struct{}{
|
||||
*pp: {},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
h[ip][*pp] = struct{}{}
|
||||
}
|
||||
|
||||
// Remove removes (ip, protocol, port) from HostPortInfo
|
||||
func (h HostPortInfo) Remove(ip, protocol string, port int32) {
|
||||
if port <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
h.sanitize(&ip, &protocol)
|
||||
|
||||
pp := NewProtocolPort(protocol, port)
|
||||
if m, ok := h[ip]; ok {
|
||||
delete(m, *pp)
|
||||
if len(h[ip]) == 0 {
|
||||
delete(h, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo
|
||||
func (h HostPortInfo) Len() int {
|
||||
length := 0
|
||||
for _, m := range h {
|
||||
length += len(m)
|
||||
}
|
||||
return length
|
||||
}
|
||||
|
||||
// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing
|
||||
// ones in HostPortInfo.
|
||||
func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool {
|
||||
if port <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
h.sanitize(&ip, &protocol)
|
||||
|
||||
pp := NewProtocolPort(protocol, port)
|
||||
|
||||
// If ip is 0.0.0.0 check all IP's (protocol, port) pair
|
||||
if ip == DefaultBindAllHostIP {
|
||||
for _, m := range h {
|
||||
if _, ok := m[*pp]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair
|
||||
for _, key := range []string{DefaultBindAllHostIP, ip} {
|
||||
if m, ok := h[key]; ok {
|
||||
if _, ok2 := m[*pp]; ok2 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// sanitize the parameters
|
||||
func (h HostPortInfo) sanitize(ip, protocol *string) {
|
||||
if len(*ip) == 0 {
|
||||
*ip = DefaultBindAllHostIP
|
||||
}
|
||||
if len(*protocol) == 0 {
|
||||
*protocol = string(v1.ProtocolTCP)
|
||||
}
|
||||
}
|
231
vendor/k8s.io/kubernetes/pkg/scheduler/cache/host_ports_test.go
generated
vendored
Normal file
231
vendor/k8s.io/kubernetes/pkg/scheduler/cache/host_ports_test.go
generated
vendored
Normal file
@ -0,0 +1,231 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type hostPortInfoParam struct {
|
||||
protocol, ip string
|
||||
port int32
|
||||
}
|
||||
|
||||
func TestHostPortInfo_AddRemove(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
added []hostPortInfoParam
|
||||
removed []hostPortInfoParam
|
||||
length int
|
||||
}{
|
||||
{
|
||||
desc: "normal add case",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 79},
|
||||
{"UDP", "127.0.0.1", 80},
|
||||
{"TCP", "127.0.0.1", 81},
|
||||
{"TCP", "127.0.0.1", 82},
|
||||
// this might not make sense in real case, but the struct doesn't forbid it.
|
||||
{"TCP", "0.0.0.0", 79},
|
||||
{"UDP", "0.0.0.0", 80},
|
||||
{"TCP", "0.0.0.0", 81},
|
||||
{"TCP", "0.0.0.0", 82},
|
||||
{"TCP", "0.0.0.0", 0},
|
||||
{"TCP", "0.0.0.0", -1},
|
||||
},
|
||||
length: 8,
|
||||
},
|
||||
{
|
||||
desc: "empty ip and protocol add should work",
|
||||
added: []hostPortInfoParam{
|
||||
{"", "127.0.0.1", 79},
|
||||
{"UDP", "127.0.0.1", 80},
|
||||
{"", "127.0.0.1", 81},
|
||||
{"", "127.0.0.1", 82},
|
||||
{"", "", 79},
|
||||
{"UDP", "", 80},
|
||||
{"", "", 81},
|
||||
{"", "", 82},
|
||||
{"", "", 0},
|
||||
{"", "", -1},
|
||||
},
|
||||
length: 8,
|
||||
},
|
||||
{
|
||||
desc: "normal remove case",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 79},
|
||||
{"UDP", "127.0.0.1", 80},
|
||||
{"TCP", "127.0.0.1", 81},
|
||||
{"TCP", "127.0.0.1", 82},
|
||||
{"TCP", "0.0.0.0", 79},
|
||||
{"UDP", "0.0.0.0", 80},
|
||||
{"TCP", "0.0.0.0", 81},
|
||||
{"TCP", "0.0.0.0", 82},
|
||||
},
|
||||
removed: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 79},
|
||||
{"UDP", "127.0.0.1", 80},
|
||||
{"TCP", "127.0.0.1", 81},
|
||||
{"TCP", "127.0.0.1", 82},
|
||||
{"TCP", "0.0.0.0", 79},
|
||||
{"UDP", "0.0.0.0", 80},
|
||||
{"TCP", "0.0.0.0", 81},
|
||||
{"TCP", "0.0.0.0", 82},
|
||||
},
|
||||
length: 0,
|
||||
},
|
||||
{
|
||||
desc: "empty ip and protocol remove should work",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 79},
|
||||
{"UDP", "127.0.0.1", 80},
|
||||
{"TCP", "127.0.0.1", 81},
|
||||
{"TCP", "127.0.0.1", 82},
|
||||
{"TCP", "0.0.0.0", 79},
|
||||
{"UDP", "0.0.0.0", 80},
|
||||
{"TCP", "0.0.0.0", 81},
|
||||
{"TCP", "0.0.0.0", 82},
|
||||
},
|
||||
removed: []hostPortInfoParam{
|
||||
{"", "127.0.0.1", 79},
|
||||
{"", "127.0.0.1", 81},
|
||||
{"", "127.0.0.1", 82},
|
||||
{"UDP", "127.0.0.1", 80},
|
||||
{"", "", 79},
|
||||
{"", "", 81},
|
||||
{"", "", 82},
|
||||
{"UDP", "", 80},
|
||||
},
|
||||
length: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
hp := make(HostPortInfo)
|
||||
for _, param := range test.added {
|
||||
hp.Add(param.ip, param.protocol, param.port)
|
||||
}
|
||||
for _, param := range test.removed {
|
||||
hp.Remove(param.ip, param.protocol, param.port)
|
||||
}
|
||||
if hp.Len() != test.length {
|
||||
t.Errorf("%v failed: expect length %d; got %d", test.desc, test.length, hp.Len())
|
||||
t.Error(hp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostPortInfo_Check(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
added []hostPortInfoParam
|
||||
check hostPortInfoParam
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
desc: "empty check should check 0.0.0.0 and TCP",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 80},
|
||||
},
|
||||
check: hostPortInfoParam{"", "", 81},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
desc: "empty check should check 0.0.0.0 and TCP (conflicted)",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 80},
|
||||
},
|
||||
check: hostPortInfoParam{"", "", 80},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
desc: "empty port check should pass",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 80},
|
||||
},
|
||||
check: hostPortInfoParam{"", "", 0},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
desc: "0.0.0.0 should check all registered IPs",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 80},
|
||||
},
|
||||
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
desc: "0.0.0.0 with different protocol should be allowed",
|
||||
added: []hostPortInfoParam{
|
||||
{"UDP", "127.0.0.1", 80},
|
||||
},
|
||||
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
desc: "0.0.0.0 with different port should be allowed",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 79},
|
||||
{"TCP", "127.0.0.1", 81},
|
||||
{"TCP", "127.0.0.1", 82},
|
||||
},
|
||||
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
desc: "normal ip should check all registered 0.0.0.0",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "0.0.0.0", 80},
|
||||
},
|
||||
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
desc: "normal ip with different port/protocol should be allowed (0.0.0.0)",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "0.0.0.0", 79},
|
||||
{"UDP", "0.0.0.0", 80},
|
||||
{"TCP", "0.0.0.0", 81},
|
||||
{"TCP", "0.0.0.0", 82},
|
||||
},
|
||||
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
desc: "normal ip with different port/protocol should be allowed",
|
||||
added: []hostPortInfoParam{
|
||||
{"TCP", "127.0.0.1", 79},
|
||||
{"UDP", "127.0.0.1", 80},
|
||||
{"TCP", "127.0.0.1", 81},
|
||||
{"TCP", "127.0.0.1", 82},
|
||||
},
|
||||
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
|
||||
expect: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
hp := make(HostPortInfo)
|
||||
for _, param := range test.added {
|
||||
hp.Add(param.ip, param.protocol, param.port)
|
||||
}
|
||||
if hp.CheckConflict(test.check.ip, test.check.protocol, test.check.port) != test.expect {
|
||||
t.Errorf("%v failed, expected %t; got %t", test.desc, test.expect, !test.expect)
|
||||
}
|
||||
}
|
||||
}
|
146
vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go
generated
vendored
146
vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go
generated
vendored
@ -22,13 +22,12 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -36,6 +35,14 @@ var (
|
||||
generation int64
|
||||
)
|
||||
|
||||
// ImageStateSummary provides summarized information about the state of an image.
|
||||
type ImageStateSummary struct {
|
||||
// Size of the image
|
||||
Size int64
|
||||
// Used to track how many nodes have this image
|
||||
NumNodes int
|
||||
}
|
||||
|
||||
// NodeInfo is node level aggregated information.
|
||||
type NodeInfo struct {
|
||||
// Overall node information.
|
||||
@ -43,7 +50,7 @@ type NodeInfo struct {
|
||||
|
||||
pods []*v1.Pod
|
||||
podsWithAffinity []*v1.Pod
|
||||
usedPorts util.HostPortInfo
|
||||
usedPorts HostPortInfo
|
||||
|
||||
// Total requested resource of all pods on this node.
|
||||
// It includes assumed pods which scheduler sends binding to apiserver but
|
||||
@ -58,14 +65,15 @@ type NodeInfo struct {
|
||||
taints []v1.Taint
|
||||
taintsErr error
|
||||
|
||||
// This is a map from image name to image size, also for checking image existence on the node
|
||||
// Cache it here to avoid rebuilding the map during scheduling, e.g., in image_locality.go
|
||||
imageSizes map[string]int64
|
||||
// imageStates holds the entry of an image if and only if this image is on the node. The entry can be used for
|
||||
// checking an image's existence and advanced usage (e.g., image locality scheduling policy) based on the image
|
||||
// state information.
|
||||
imageStates map[string]*ImageStateSummary
|
||||
|
||||
// TransientInfo holds the information pertaining to a scheduling cycle. This will be destructed at the end of
|
||||
// scheduling cycle.
|
||||
// TODO: @ravig. Remove this once we have a clear approach for message passing across predicates and priorities.
|
||||
TransientInfo *transientSchedulerInfo
|
||||
TransientInfo *TransientSchedulerInfo
|
||||
|
||||
// Cached conditions of node for faster lookup.
|
||||
memoryPressureCondition v1.ConditionStatus
|
||||
@ -98,28 +106,28 @@ type nodeTransientInfo struct {
|
||||
RequestedVolumes int
|
||||
}
|
||||
|
||||
// transientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle.
|
||||
// TransientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle.
|
||||
// It consists of items that are valid for a scheduling cycle and is used for message passing across predicates and
|
||||
// priorities. Some examples which could be used as fields are number of volumes being used on node, current utilization
|
||||
// on node etc.
|
||||
// IMPORTANT NOTE: Make sure that each field in this structure is documented along with usage. Expand this structure
|
||||
// only when absolutely needed as this data structure will be created and destroyed during every scheduling cycle.
|
||||
type transientSchedulerInfo struct {
|
||||
type TransientSchedulerInfo struct {
|
||||
TransientLock sync.Mutex
|
||||
// NodeTransInfo holds the information related to nodeTransientInformation. NodeName is the key here.
|
||||
TransNodeInfo nodeTransientInfo
|
||||
}
|
||||
|
||||
// newTransientSchedulerInfo returns a new scheduler transient structure with initialized values.
|
||||
func newTransientSchedulerInfo() *transientSchedulerInfo {
|
||||
tsi := &transientSchedulerInfo{
|
||||
// NewTransientSchedulerInfo returns a new scheduler transient structure with initialized values.
|
||||
func NewTransientSchedulerInfo() *TransientSchedulerInfo {
|
||||
tsi := &TransientSchedulerInfo{
|
||||
TransNodeInfo: initializeNodeTransientInfo(),
|
||||
}
|
||||
return tsi
|
||||
}
|
||||
|
||||
// resetTransientSchedulerInfo resets the transientSchedulerInfo.
|
||||
func (transientSchedInfo *transientSchedulerInfo) resetTransientSchedulerInfo() {
|
||||
// ResetTransientSchedulerInfo resets the TransientSchedulerInfo.
|
||||
func (transientSchedInfo *TransientSchedulerInfo) ResetTransientSchedulerInfo() {
|
||||
transientSchedInfo.TransientLock.Lock()
|
||||
defer transientSchedInfo.TransientLock.Unlock()
|
||||
// Reset TransientNodeInfo.
|
||||
@ -258,10 +266,10 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
|
||||
requestedResource: &Resource{},
|
||||
nonzeroRequest: &Resource{},
|
||||
allocatableResource: &Resource{},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
TransientInfo: NewTransientSchedulerInfo(),
|
||||
generation: nextGeneration(),
|
||||
usedPorts: make(util.HostPortInfo),
|
||||
imageSizes: make(map[string]int64),
|
||||
usedPorts: make(HostPortInfo),
|
||||
imageStates: make(map[string]*ImageStateSummary),
|
||||
}
|
||||
for _, pod := range pods {
|
||||
ni.AddPod(pod)
|
||||
@ -285,20 +293,35 @@ func (n *NodeInfo) Pods() []*v1.Pod {
|
||||
return n.pods
|
||||
}
|
||||
|
||||
// SetPods sets all pods scheduled (including assumed to be) on this node.
|
||||
func (n *NodeInfo) SetPods(pods []*v1.Pod) {
|
||||
n.pods = pods
|
||||
}
|
||||
|
||||
// UsedPorts returns used ports on this node.
|
||||
func (n *NodeInfo) UsedPorts() util.HostPortInfo {
|
||||
func (n *NodeInfo) UsedPorts() HostPortInfo {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.usedPorts
|
||||
}
|
||||
|
||||
// ImageSizes returns the image size information on this node.
|
||||
func (n *NodeInfo) ImageSizes() map[string]int64 {
|
||||
// SetUsedPorts sets the used ports on this node.
|
||||
func (n *NodeInfo) SetUsedPorts(newUsedPorts HostPortInfo) {
|
||||
n.usedPorts = newUsedPorts
|
||||
}
|
||||
|
||||
// ImageStates returns the state information of all images.
|
||||
func (n *NodeInfo) ImageStates() map[string]*ImageStateSummary {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.imageSizes
|
||||
return n.imageStates
|
||||
}
|
||||
|
||||
// SetImageStates sets the state information of all images.
|
||||
func (n *NodeInfo) SetImageStates(newImageStates map[string]*ImageStateSummary) {
|
||||
n.imageStates = newImageStates
|
||||
}
|
||||
|
||||
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
|
||||
@ -325,6 +348,11 @@ func (n *NodeInfo) Taints() ([]v1.Taint, error) {
|
||||
return n.taints, n.taintsErr
|
||||
}
|
||||
|
||||
// SetTaints sets the taints list on this node.
|
||||
func (n *NodeInfo) SetTaints(newTaints []v1.Taint) {
|
||||
n.taints = newTaints
|
||||
}
|
||||
|
||||
// MemoryPressureCondition returns the memory pressure condition status on this node.
|
||||
func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus {
|
||||
if n == nil {
|
||||
@ -357,6 +385,11 @@ func (n *NodeInfo) RequestedResource() Resource {
|
||||
return *n.requestedResource
|
||||
}
|
||||
|
||||
// SetRequestedResource sets the aggregated resource request of pods on this node.
|
||||
func (n *NodeInfo) SetRequestedResource(newResource *Resource) {
|
||||
n.requestedResource = newResource
|
||||
}
|
||||
|
||||
// NonZeroRequest returns aggregated nonzero resource request of pods on this node.
|
||||
func (n *NodeInfo) NonZeroRequest() Resource {
|
||||
if n == nil {
|
||||
@ -365,6 +398,11 @@ func (n *NodeInfo) NonZeroRequest() Resource {
|
||||
return *n.nonzeroRequest
|
||||
}
|
||||
|
||||
// SetNonZeroRequest sets the aggregated nonzero resource request of pods on this node.
|
||||
func (n *NodeInfo) SetNonZeroRequest(newResource *Resource) {
|
||||
n.nonzeroRequest = newResource
|
||||
}
|
||||
|
||||
// AllocatableResource returns allocatable resources on a given node.
|
||||
func (n *NodeInfo) AllocatableResource() Resource {
|
||||
if n == nil {
|
||||
@ -379,6 +417,19 @@ func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
|
||||
n.generation = nextGeneration()
|
||||
}
|
||||
|
||||
// GetGeneration returns the generation on this node.
|
||||
func (n *NodeInfo) GetGeneration() int64 {
|
||||
if n == nil {
|
||||
return 0
|
||||
}
|
||||
return n.generation
|
||||
}
|
||||
|
||||
// SetGeneration sets the generation on this node. This is for testing only.
|
||||
func (n *NodeInfo) SetGeneration(newGeneration int64) {
|
||||
n.generation = newGeneration
|
||||
}
|
||||
|
||||
// Clone returns a copy of this node.
|
||||
func (n *NodeInfo) Clone() *NodeInfo {
|
||||
clone := &NodeInfo{
|
||||
@ -391,16 +442,21 @@ func (n *NodeInfo) Clone() *NodeInfo {
|
||||
memoryPressureCondition: n.memoryPressureCondition,
|
||||
diskPressureCondition: n.diskPressureCondition,
|
||||
pidPressureCondition: n.pidPressureCondition,
|
||||
usedPorts: make(util.HostPortInfo),
|
||||
imageSizes: n.imageSizes,
|
||||
usedPorts: make(HostPortInfo),
|
||||
imageStates: n.imageStates,
|
||||
generation: n.generation,
|
||||
}
|
||||
if len(n.pods) > 0 {
|
||||
clone.pods = append([]*v1.Pod(nil), n.pods...)
|
||||
}
|
||||
if len(n.usedPorts) > 0 {
|
||||
for k, v := range n.usedPorts {
|
||||
clone.usedPorts[k] = v
|
||||
// HostPortInfo is a map-in-map struct
|
||||
// make sure it's deep copied
|
||||
for ip, portMap := range n.usedPorts {
|
||||
clone.usedPorts[ip] = make(map[ProtocolPort]struct{})
|
||||
for protocolPort, v := range portMap {
|
||||
clone.usedPorts[ip][protocolPort] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(n.podsWithAffinity) > 0 {
|
||||
@ -458,22 +514,22 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||
}
|
||||
|
||||
// Consume ports when pods added.
|
||||
n.updateUsedPorts(pod, true)
|
||||
n.UpdateUsedPorts(pod, true)
|
||||
|
||||
n.generation = nextGeneration()
|
||||
}
|
||||
|
||||
// RemovePod subtracts pod information from this NodeInfo.
|
||||
func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||
k1, err := getPodKey(pod)
|
||||
k1, err := GetPodKey(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range n.podsWithAffinity {
|
||||
k2, err := getPodKey(n.podsWithAffinity[i])
|
||||
k2, err := GetPodKey(n.podsWithAffinity[i])
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot get pod key, err: %v", err)
|
||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||
continue
|
||||
}
|
||||
if k1 == k2 {
|
||||
@ -484,9 +540,9 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||
}
|
||||
}
|
||||
for i := range n.pods {
|
||||
k2, err := getPodKey(n.pods[i])
|
||||
k2, err := GetPodKey(n.pods[i])
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot get pod key, err: %v", err)
|
||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||
continue
|
||||
}
|
||||
if k1 == k2 {
|
||||
@ -509,7 +565,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||
n.nonzeroRequest.Memory -= non0Mem
|
||||
|
||||
// Release ports when remove Pods.
|
||||
n.updateUsedPorts(pod, false)
|
||||
n.UpdateUsedPorts(pod, false)
|
||||
|
||||
n.generation = nextGeneration()
|
||||
|
||||
@ -533,7 +589,8 @@ func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64)
|
||||
return
|
||||
}
|
||||
|
||||
func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) {
|
||||
// UpdateUsedPorts updates the UsedPorts of NodeInfo.
|
||||
func (n *NodeInfo) UpdateUsedPorts(pod *v1.Pod, add bool) {
|
||||
for j := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[j]
|
||||
for k := range container.Ports {
|
||||
@ -547,17 +604,6 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NodeInfo) updateImageSizes() {
|
||||
node := n.Node()
|
||||
imageSizes := make(map[string]int64)
|
||||
for _, image := range node.Status.Images {
|
||||
for _, name := range image.Names {
|
||||
imageSizes[name] = image.SizeBytes
|
||||
}
|
||||
}
|
||||
n.imageSizes = imageSizes
|
||||
}
|
||||
|
||||
// SetNode sets the overall node information.
|
||||
func (n *NodeInfo) SetNode(node *v1.Node) error {
|
||||
n.node = node
|
||||
@ -578,8 +624,7 @@ func (n *NodeInfo) SetNode(node *v1.Node) error {
|
||||
// We ignore other conditions.
|
||||
}
|
||||
}
|
||||
n.TransientInfo = newTransientSchedulerInfo()
|
||||
n.updateImageSizes()
|
||||
n.TransientInfo = NewTransientSchedulerInfo()
|
||||
n.generation = nextGeneration()
|
||||
return nil
|
||||
}
|
||||
@ -596,6 +641,7 @@ func (n *NodeInfo) RemoveNode(node *v1.Node) error {
|
||||
n.memoryPressureCondition = v1.ConditionUnknown
|
||||
n.diskPressureCondition = v1.ConditionUnknown
|
||||
n.pidPressureCondition = v1.ConditionUnknown
|
||||
n.imageStates = make(map[string]*ImageStateSummary)
|
||||
n.generation = nextGeneration()
|
||||
return nil
|
||||
}
|
||||
@ -619,9 +665,9 @@ func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod {
|
||||
continue
|
||||
}
|
||||
// If pod is on the given node, add it to 'filtered' only if it is present in nodeInfo.
|
||||
podKey, _ := getPodKey(p)
|
||||
podKey, _ := GetPodKey(p)
|
||||
for _, np := range n.Pods() {
|
||||
npodkey, _ := getPodKey(np)
|
||||
npodkey, _ := GetPodKey(np)
|
||||
if npodkey == podKey {
|
||||
filtered = append(filtered, p)
|
||||
break
|
||||
@ -631,8 +677,8 @@ func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod {
|
||||
return filtered
|
||||
}
|
||||
|
||||
// getPodKey returns the string key of a pod.
|
||||
func getPodKey(pod *v1.Pod) (string, error) {
|
||||
// GetPodKey returns the string key of a pod.
|
||||
func GetPodKey(pod *v1.Pod) (string, error) {
|
||||
uid := string(pod.UID)
|
||||
if len(uid) == 0 {
|
||||
return "", errors.New("Cannot get cache key for pod with empty UID")
|
||||
|
125
vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info_test.go
generated
vendored
125
vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info_test.go
generated
vendored
@ -19,14 +19,13 @@ package cache
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
)
|
||||
|
||||
func TestNewResource(t *testing.T) {
|
||||
@ -241,44 +240,41 @@ func TestSetMaxResource(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageSizes(t *testing.T) {
|
||||
ni := fakeNodeInfo()
|
||||
ni.node = &v1.Node{
|
||||
type testingMode interface {
|
||||
Fatalf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func makeBasePod(t testingMode, nodeName, objName, cpu, mem, extended string, ports []v1.ContainerPort) *v1.Pod {
|
||||
req := v1.ResourceList{}
|
||||
if cpu != "" {
|
||||
req = v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(cpu),
|
||||
v1.ResourceMemory: resource.MustParse(mem),
|
||||
}
|
||||
if extended != "" {
|
||||
parts := strings.Split(extended, ":")
|
||||
if len(parts) != 2 {
|
||||
t.Fatalf("Invalid extended resource string: \"%s\"", extended)
|
||||
}
|
||||
req[v1.ResourceName(parts[0])] = resource.MustParse(parts[1])
|
||||
}
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node",
|
||||
UID: types.UID(objName),
|
||||
Namespace: "node_info_cache_test",
|
||||
Name: objName,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/10:" + parsers.DefaultImageTag,
|
||||
"gcr.io/10:v1",
|
||||
},
|
||||
SizeBytes: int64(10 * 1024 * 1024),
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: req,
|
||||
},
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/50:" + parsers.DefaultImageTag,
|
||||
"gcr.io/50:v1",
|
||||
},
|
||||
SizeBytes: int64(50 * 1024 * 1024),
|
||||
},
|
||||
},
|
||||
Ports: ports,
|
||||
}},
|
||||
NodeName: nodeName,
|
||||
},
|
||||
}
|
||||
|
||||
ni.updateImageSizes()
|
||||
expected := map[string]int64{
|
||||
"gcr.io/10:" + parsers.DefaultImageTag: 10 * 1024 * 1024,
|
||||
"gcr.io/10:v1": 10 * 1024 * 1024,
|
||||
"gcr.io/50:" + parsers.DefaultImageTag: 50 * 1024 * 1024,
|
||||
"gcr.io/50:v1": 50 * 1024 * 1024,
|
||||
}
|
||||
|
||||
imageSizes := ni.ImageSizes()
|
||||
if !reflect.DeepEqual(expected, imageSizes) {
|
||||
t.Errorf("expected: %#v, got: %#v", expected, imageSizes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNodeInfo(t *testing.T) {
|
||||
@ -303,16 +299,16 @@ func TestNewNodeInfo(t *testing.T) {
|
||||
AllowedPodNumber: 0,
|
||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
TransientInfo: NewTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
generation: 2,
|
||||
usedPorts: util.HostPortInfo{
|
||||
"127.0.0.1": map[util.ProtocolPort]struct{}{
|
||||
usedPorts: HostPortInfo{
|
||||
"127.0.0.1": map[ProtocolPort]struct{}{
|
||||
{Protocol: "TCP", Port: 80}: {},
|
||||
{Protocol: "TCP", Port: 8080}: {},
|
||||
},
|
||||
},
|
||||
imageSizes: map[string]int64{},
|
||||
imageStates: map[string]*ImageStateSummary{},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -392,18 +388,16 @@ func TestNodeInfoClone(t *testing.T) {
|
||||
nodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{},
|
||||
nonzeroRequest: &Resource{},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
TransientInfo: NewTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
generation: 2,
|
||||
usedPorts: util.HostPortInfo{
|
||||
"127.0.0.1": map[util.ProtocolPort]struct{}{
|
||||
usedPorts: HostPortInfo{
|
||||
"127.0.0.1": map[ProtocolPort]struct{}{
|
||||
{Protocol: "TCP", Port: 80}: {},
|
||||
{Protocol: "TCP", Port: 8080}: {},
|
||||
},
|
||||
},
|
||||
imageSizes: map[string]int64{
|
||||
"gcr.io/10": 10 * 1024 * 1024,
|
||||
},
|
||||
imageStates: map[string]*ImageStateSummary{},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -464,18 +458,16 @@ func TestNodeInfoClone(t *testing.T) {
|
||||
expected: &NodeInfo{
|
||||
requestedResource: &Resource{},
|
||||
nonzeroRequest: &Resource{},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
TransientInfo: NewTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
generation: 2,
|
||||
usedPorts: util.HostPortInfo{
|
||||
"127.0.0.1": map[util.ProtocolPort]struct{}{
|
||||
usedPorts: HostPortInfo{
|
||||
"127.0.0.1": map[ProtocolPort]struct{}{
|
||||
{Protocol: "TCP", Port: 80}: {},
|
||||
{Protocol: "TCP", Port: 8080}: {},
|
||||
},
|
||||
},
|
||||
imageSizes: map[string]int64{
|
||||
"gcr.io/10": 10 * 1024 * 1024,
|
||||
},
|
||||
imageStates: map[string]*ImageStateSummary{},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -540,6 +532,7 @@ func TestNodeInfoClone(t *testing.T) {
|
||||
ni := test.nodeInfo.Clone()
|
||||
// Modify the field to check if the result is a clone of the origin one.
|
||||
test.nodeInfo.generation += 10
|
||||
test.nodeInfo.usedPorts.Remove("127.0.0.1", "TCP", 80)
|
||||
if !reflect.DeepEqual(test.expected, ni) {
|
||||
t.Errorf("expected: %#v, got: %#v", test.expected, ni)
|
||||
}
|
||||
@ -624,16 +617,16 @@ func TestNodeInfoAddPod(t *testing.T) {
|
||||
AllowedPodNumber: 0,
|
||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
TransientInfo: NewTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
generation: 2,
|
||||
usedPorts: util.HostPortInfo{
|
||||
"127.0.0.1": map[util.ProtocolPort]struct{}{
|
||||
usedPorts: HostPortInfo{
|
||||
"127.0.0.1": map[ProtocolPort]struct{}{
|
||||
{Protocol: "TCP", Port: 80}: {},
|
||||
{Protocol: "TCP", Port: 8080}: {},
|
||||
},
|
||||
},
|
||||
imageSizes: map[string]int64{},
|
||||
imageStates: map[string]*ImageStateSummary{},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -743,16 +736,16 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
||||
AllowedPodNumber: 0,
|
||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
TransientInfo: NewTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
generation: 2,
|
||||
usedPorts: util.HostPortInfo{
|
||||
"127.0.0.1": map[util.ProtocolPort]struct{}{
|
||||
usedPorts: HostPortInfo{
|
||||
"127.0.0.1": map[ProtocolPort]struct{}{
|
||||
{Protocol: "TCP", Port: 80}: {},
|
||||
{Protocol: "TCP", Port: 8080}: {},
|
||||
},
|
||||
},
|
||||
imageSizes: map[string]int64{},
|
||||
imageStates: map[string]*ImageStateSummary{},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -860,15 +853,15 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
||||
AllowedPodNumber: 0,
|
||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
TransientInfo: NewTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
generation: 3,
|
||||
usedPorts: util.HostPortInfo{
|
||||
"127.0.0.1": map[util.ProtocolPort]struct{}{
|
||||
usedPorts: HostPortInfo{
|
||||
"127.0.0.1": map[ProtocolPort]struct{}{
|
||||
{Protocol: "TCP", Port: 8080}: {},
|
||||
},
|
||||
},
|
||||
imageSizes: map[string]int64{},
|
||||
imageStates: map[string]*ImageStateSummary{},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -909,7 +902,7 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
||||
err := ni.RemovePod(test.pod)
|
||||
if err != nil {
|
||||
if test.errExpected {
|
||||
expectedErrorMsg := fmt.Errorf("no corresponding pod %s in pods of node %s", test.pod.Name, ni.node.Name)
|
||||
expectedErrorMsg := fmt.Errorf("no corresponding pod %s in pods of node %s", test.pod.Name, ni.Node().Name)
|
||||
if expectedErrorMsg == err {
|
||||
t.Errorf("expected error: %v, got: %v", expectedErrorMsg, err)
|
||||
}
|
||||
@ -931,10 +924,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
||||
|
||||
func fakeNodeInfo(pods ...*v1.Pod) *NodeInfo {
|
||||
ni := NewNodeInfo(pods...)
|
||||
ni.node = &v1.Node{
|
||||
ni.SetNode(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node",
|
||||
},
|
||||
}
|
||||
})
|
||||
return ni
|
||||
}
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/scheduler/cache/util.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/scheduler/cache/util.go
generated
vendored
@ -16,7 +16,10 @@ limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import "k8s.io/api/core/v1"
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names
|
||||
// and the values are the aggregated information for that node.
|
||||
@ -29,11 +32,47 @@ func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeI
|
||||
}
|
||||
nodeNameToInfo[nodeName].AddPod(pod)
|
||||
}
|
||||
imageExistenceMap := createImageExistenceMap(nodes)
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, ok := nodeNameToInfo[node.Name]; !ok {
|
||||
nodeNameToInfo[node.Name] = NewNodeInfo()
|
||||
}
|
||||
nodeNameToInfo[node.Name].SetNode(node)
|
||||
nodeInfo := nodeNameToInfo[node.Name]
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo.imageStates = getNodeImageStates(node, imageExistenceMap)
|
||||
}
|
||||
return nodeNameToInfo
|
||||
}
|
||||
|
||||
// getNodeImageStates returns the given node's image states based on the given imageExistence map.
|
||||
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*ImageStateSummary {
|
||||
imageStates := make(map[string]*ImageStateSummary)
|
||||
|
||||
for _, image := range node.Status.Images {
|
||||
for _, name := range image.Names {
|
||||
imageStates[name] = &ImageStateSummary{
|
||||
Size: image.SizeBytes,
|
||||
NumNodes: len(imageExistenceMap[name]),
|
||||
}
|
||||
}
|
||||
}
|
||||
return imageStates
|
||||
}
|
||||
|
||||
// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names.
|
||||
func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String {
|
||||
imageExistenceMap := make(map[string]sets.String)
|
||||
for _, node := range nodes {
|
||||
for _, image := range node.Status.Images {
|
||||
for _, name := range image.Names {
|
||||
if _, ok := imageExistenceMap[name]; !ok {
|
||||
imageExistenceMap[name] = sets.NewString(node.Name)
|
||||
} else {
|
||||
imageExistenceMap[name].Insert(node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return imageExistenceMap
|
||||
}
|
||||
|
134
vendor/k8s.io/kubernetes/pkg/scheduler/cache/util_test.go
generated
vendored
Normal file
134
vendor/k8s.io/kubernetes/pkg/scheduler/cache/util_test.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const mb int64 = 1024 * 1024
|
||||
|
||||
func TestGetNodeImageStates(t *testing.T) {
|
||||
tests := []struct {
|
||||
node *v1.Node
|
||||
imageExistenceMap map[string]sets.String
|
||||
expected map[string]*ImageStateSummary
|
||||
}{
|
||||
{
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-0"},
|
||||
Status: v1.NodeStatus{
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/10:v1",
|
||||
},
|
||||
SizeBytes: int64(10 * mb),
|
||||
},
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/200:v1",
|
||||
},
|
||||
SizeBytes: int64(200 * mb),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
imageExistenceMap: map[string]sets.String{
|
||||
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
|
||||
"gcr.io/200:v1": sets.NewString("node-0"),
|
||||
},
|
||||
expected: map[string]*ImageStateSummary{
|
||||
"gcr.io/10:v1": {
|
||||
Size: int64(10 * mb),
|
||||
NumNodes: 2,
|
||||
},
|
||||
"gcr.io/200:v1": {
|
||||
Size: int64(200 * mb),
|
||||
NumNodes: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
imageStates := getNodeImageStates(test.node, test.imageExistenceMap)
|
||||
if !reflect.DeepEqual(test.expected, imageStates) {
|
||||
t.Errorf("expected: %#v, got: %#v", test.expected, imageStates)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateImageExistenceMap(t *testing.T) {
|
||||
tests := []struct {
|
||||
nodes []*v1.Node
|
||||
expected map[string]sets.String
|
||||
}{
|
||||
{
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-0"},
|
||||
Status: v1.NodeStatus{
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/10:v1",
|
||||
},
|
||||
SizeBytes: int64(10 * mb),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-1"},
|
||||
Status: v1.NodeStatus{
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/10:v1",
|
||||
},
|
||||
SizeBytes: int64(10 * mb),
|
||||
},
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/200:v1",
|
||||
},
|
||||
SizeBytes: int64(200 * mb),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: map[string]sets.String{
|
||||
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
|
||||
"gcr.io/200:v1": sets.NewString("node-1"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
imageMap := createImageExistenceMap(test.nodes)
|
||||
if !reflect.DeepEqual(test.expected, imageMap) {
|
||||
t.Errorf("expected: %#v, got: %#v", test.expected, imageMap)
|
||||
}
|
||||
}
|
||||
}
|
101
vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD
generated
vendored
101
vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD
generated
vendored
@ -1,18 +1,44 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"extender.go",
|
||||
"generic_scheduler.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/core",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core/equivalence:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"equivalence_cache_test.go",
|
||||
"extender_test.go",
|
||||
"generic_scheduler_test.go",
|
||||
"scheduling_queue_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@ -22,52 +48,19 @@ go_test(
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core/equivalence:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"equivalence_cache.go",
|
||||
"extender.go",
|
||||
"generic_scheduler.go",
|
||||
"scheduling_queue.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/core",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -80,6 +73,10 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/core/equivalence:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
49
vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/BUILD
generated
vendored
Normal file
49
vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/BUILD
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["eqivalence.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/core/equivalence",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["eqivalence_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
443
vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/eqivalence.go
generated
vendored
Normal file
443
vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/eqivalence.go
generated
vendored
Normal file
@ -0,0 +1,443 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package equivalence defines Pod equivalence classes and the equivalence class
|
||||
// cache.
|
||||
package equivalence
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
// nodeMap stores a *NodeCache for each node.
|
||||
type nodeMap map[string]*NodeCache
|
||||
|
||||
// Cache is a thread safe map saves and reuses the output of predicate functions,
|
||||
// it uses node name as key to access those cached results.
|
||||
//
|
||||
// Internally, results are keyed by predicate name, and "equivalence
|
||||
// class". (Equivalence class is defined in the `Class` type.) Saved results
|
||||
// will be reused until an appropriate invalidation function is called.
|
||||
type Cache struct {
|
||||
// NOTE(harry): Theoretically sync.Map has better performance in machine with 8+ CPUs, while
|
||||
// the reality is lock contention in first level cache is rare.
|
||||
mu sync.RWMutex
|
||||
nodeToCache nodeMap
|
||||
predicateIDMap map[string]int
|
||||
}
|
||||
|
||||
// NewCache create an empty equiv class cache.
|
||||
func NewCache(predicates []string) *Cache {
|
||||
predicateIDMap := make(map[string]int, len(predicates))
|
||||
for id, predicate := range predicates {
|
||||
predicateIDMap[predicate] = id
|
||||
}
|
||||
return &Cache{
|
||||
nodeToCache: make(nodeMap),
|
||||
predicateIDMap: predicateIDMap,
|
||||
}
|
||||
}
|
||||
|
||||
// NodeCache saves and reuses the output of predicate functions. Use RunPredicate to
|
||||
// get or update the cached results. An appropriate Invalidate* function should
|
||||
// be called when some predicate results are no longer valid.
|
||||
//
|
||||
// Internally, results are keyed by predicate name, and "equivalence
|
||||
// class". (Equivalence class is defined in the `Class` type.) Saved results
|
||||
// will be reused until an appropriate invalidation function is called.
|
||||
//
|
||||
// NodeCache objects are thread safe within the context of NodeCache,
|
||||
type NodeCache struct {
|
||||
mu sync.RWMutex
|
||||
cache predicateMap
|
||||
// generation is current generation of node cache, incremented on node
|
||||
// invalidation.
|
||||
generation uint64
|
||||
// snapshotGeneration saves snapshot of generation of node cache.
|
||||
snapshotGeneration uint64
|
||||
// predicateGenerations stores generation numbers for predicates, incremented on
|
||||
// predicate invalidation. Created on first update. Use 0 if does not
|
||||
// exist.
|
||||
predicateGenerations []uint64
|
||||
// snapshotPredicateGenerations saves snapshot of generation numbers for predicates.
|
||||
snapshotPredicateGenerations []uint64
|
||||
}
|
||||
|
||||
// newNodeCache returns an empty NodeCache.
|
||||
func newNodeCache(n int) *NodeCache {
|
||||
return &NodeCache{
|
||||
cache: make(predicateMap, n),
|
||||
predicateGenerations: make([]uint64, n),
|
||||
snapshotPredicateGenerations: make([]uint64, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Snapshot snapshots current generations of cache.
|
||||
// NOTE: We snapshot generations of all node caches before using it and these
|
||||
// operations are serialized, we can save snapshot as member of node cache
|
||||
// itself.
|
||||
func (c *Cache) Snapshot() {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
for _, n := range c.nodeToCache {
|
||||
n.mu.Lock()
|
||||
// snapshot predicate generations
|
||||
copy(n.snapshotPredicateGenerations, n.predicateGenerations)
|
||||
// snapshot node generation
|
||||
n.snapshotGeneration = n.generation
|
||||
n.mu.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetNodeCache returns the existing NodeCache for given node if present. Otherwise,
|
||||
// it creates the NodeCache and returns it.
|
||||
// The boolean flag is true if the value was loaded, false if created.
|
||||
func (c *Cache) GetNodeCache(name string) (nodeCache *NodeCache, exists bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if nodeCache, exists = c.nodeToCache[name]; !exists {
|
||||
nodeCache = newNodeCache(len(c.predicateIDMap))
|
||||
c.nodeToCache[name] = nodeCache
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// LoadNodeCache returns the existing NodeCache for given node, nil if not
|
||||
// present.
|
||||
func (c *Cache) LoadNodeCache(node string) *NodeCache {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.nodeToCache[node]
|
||||
}
|
||||
|
||||
func (c *Cache) predicateKeysToIDs(predicateKeys sets.String) []int {
|
||||
predicateIDs := make([]int, 0, len(predicateKeys))
|
||||
for predicateKey := range predicateKeys {
|
||||
if id, ok := c.predicateIDMap[predicateKey]; ok {
|
||||
predicateIDs = append(predicateIDs, id)
|
||||
} else {
|
||||
klog.Errorf("predicate key %q not found", predicateKey)
|
||||
}
|
||||
}
|
||||
return predicateIDs
|
||||
}
|
||||
|
||||
// InvalidatePredicates clears all cached results for the given predicates.
|
||||
func (c *Cache) InvalidatePredicates(predicateKeys sets.String) {
|
||||
if len(predicateKeys) == 0 {
|
||||
return
|
||||
}
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
predicateIDs := c.predicateKeysToIDs(predicateKeys)
|
||||
for _, n := range c.nodeToCache {
|
||||
n.invalidatePreds(predicateIDs)
|
||||
}
|
||||
klog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys)
|
||||
|
||||
}
|
||||
|
||||
// InvalidatePredicatesOnNode clears cached results for the given predicates on one node.
|
||||
func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.String) {
|
||||
if len(predicateKeys) == 0 {
|
||||
return
|
||||
}
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
predicateIDs := c.predicateKeysToIDs(predicateKeys)
|
||||
if n, ok := c.nodeToCache[nodeName]; ok {
|
||||
n.invalidatePreds(predicateIDs)
|
||||
}
|
||||
klog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys)
|
||||
}
|
||||
|
||||
// InvalidateAllPredicatesOnNode clears all cached results for one node.
|
||||
func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
if node, ok := c.nodeToCache[nodeName]; ok {
|
||||
node.invalidate()
|
||||
}
|
||||
klog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName)
|
||||
}
|
||||
|
||||
// InvalidateCachedPredicateItemForPodAdd is a wrapper of
|
||||
// InvalidateCachedPredicateItem for pod add case
|
||||
// TODO: This does not belong with the equivalence cache implementation.
|
||||
func (c *Cache) InvalidateCachedPredicateItemForPodAdd(pod *v1.Pod, nodeName string) {
|
||||
// MatchInterPodAffinity: we assume scheduler can make sure newly bound pod
|
||||
// will not break the existing inter pod affinity. So we does not need to
|
||||
// invalidate MatchInterPodAffinity when pod added.
|
||||
//
|
||||
// But when a pod is deleted, existing inter pod affinity may become invalid.
|
||||
// (e.g. this pod was preferred by some else, or vice versa)
|
||||
//
|
||||
// NOTE: assumptions above will not stand when we implemented features like
|
||||
// RequiredDuringSchedulingRequiredDuringExecutioc.
|
||||
|
||||
// NoDiskConflict: the newly scheduled pod fits to existing pods on this node,
|
||||
// it will also fits to equivalence class of existing pods
|
||||
|
||||
// GeneralPredicates: will always be affected by adding a new pod
|
||||
invalidPredicates := sets.NewString(predicates.GeneralPred)
|
||||
|
||||
// MaxPDVolumeCountPredicate: we check the volumes of pod to make decisioc.
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil {
|
||||
invalidPredicates.Insert(
|
||||
predicates.MaxEBSVolumeCountPred,
|
||||
predicates.MaxGCEPDVolumeCountPred,
|
||||
predicates.MaxAzureDiskVolumeCountPred)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
invalidPredicates.Insert(predicates.MaxCSIVolumeCountPred)
|
||||
}
|
||||
} else {
|
||||
// We do not consider CSI volumes here because CSI
|
||||
// volumes can not be used inline.
|
||||
if vol.AWSElasticBlockStore != nil {
|
||||
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred)
|
||||
}
|
||||
if vol.GCEPersistentDisk != nil {
|
||||
invalidPredicates.Insert(predicates.MaxGCEPDVolumeCountPred)
|
||||
}
|
||||
if vol.AzureDisk != nil {
|
||||
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.InvalidatePredicatesOnNode(nodeName, invalidPredicates)
|
||||
}
|
||||
|
||||
// Class represents a set of pods which are equivalent from the perspective of
|
||||
// the scheduler. i.e. the scheduler would make the same decision for any pod
|
||||
// from the same class.
|
||||
type Class struct {
|
||||
// Equivalence hash
|
||||
hash uint64
|
||||
}
|
||||
|
||||
// NewClass returns the equivalence class for a given Pod. The returned Class
|
||||
// objects will be equal for two Pods in the same class. nil values should not
|
||||
// be considered equal to each other.
|
||||
//
|
||||
// NOTE: Make sure to compare types of Class and not *Class.
|
||||
// TODO(misterikkit): Return error instead of nil *Class.
|
||||
func NewClass(pod *v1.Pod) *Class {
|
||||
equivalencePod := getEquivalencePod(pod)
|
||||
if equivalencePod != nil {
|
||||
hash := fnv.New32a()
|
||||
hashutil.DeepHashObject(hash, equivalencePod)
|
||||
return &Class{
|
||||
hash: uint64(hash.Sum32()),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// predicateMap stores resultMaps with predicate ID as the key.
|
||||
type predicateMap []resultMap
|
||||
|
||||
// resultMap stores PredicateResult with pod equivalence hash as the key.
|
||||
type resultMap map[uint64]predicateResult
|
||||
|
||||
// predicateResult stores the output of a FitPredicate.
|
||||
type predicateResult struct {
|
||||
Fit bool
|
||||
FailReasons []algorithm.PredicateFailureReason
|
||||
}
|
||||
|
||||
// RunPredicate returns a cached predicate result. In case of a cache miss, the predicate will be
|
||||
// run and its results cached for the next call.
|
||||
//
|
||||
// NOTE: RunPredicate will not update the equivalence cache if generation does not match live version.
|
||||
func (n *NodeCache) RunPredicate(
|
||||
pred algorithm.FitPredicate,
|
||||
predicateKey string,
|
||||
predicateID int,
|
||||
pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata,
|
||||
nodeInfo *schedulercache.NodeInfo,
|
||||
equivClass *Class,
|
||||
) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
// This may happen during tests.
|
||||
return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid")
|
||||
}
|
||||
|
||||
result, ok := n.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, predicateID, equivClass.hash)
|
||||
if ok {
|
||||
return result.Fit, result.FailReasons, nil
|
||||
}
|
||||
fit, reasons, err := pred(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return fit, reasons, err
|
||||
}
|
||||
n.updateResult(pod.GetName(), predicateKey, predicateID, fit, reasons, equivClass.hash, nodeInfo)
|
||||
return fit, reasons, nil
|
||||
}
|
||||
|
||||
// updateResult updates the cached result of a predicate.
|
||||
func (n *NodeCache) updateResult(
|
||||
podName, predicateKey string,
|
||||
predicateID int,
|
||||
fit bool,
|
||||
reasons []algorithm.PredicateFailureReason,
|
||||
equivalenceHash uint64,
|
||||
nodeInfo *schedulercache.NodeInfo,
|
||||
) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
// This may happen during tests.
|
||||
metrics.EquivalenceCacheWrites.WithLabelValues("discarded_bad_node").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
predicateItem := predicateResult{
|
||||
Fit: fit,
|
||||
FailReasons: reasons,
|
||||
}
|
||||
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
if (n.snapshotGeneration != n.generation) || (n.snapshotPredicateGenerations[predicateID] != n.predicateGenerations[predicateID]) {
|
||||
// Generation of node or predicate has been updated since we last took
|
||||
// a snapshot, this indicates that we received a invalidation request
|
||||
// during this time. Cache may be stale, skip update.
|
||||
metrics.EquivalenceCacheWrites.WithLabelValues("discarded_stale").Inc()
|
||||
return
|
||||
}
|
||||
// If cached predicate map already exists, just update the predicate by key
|
||||
if predicates := n.cache[predicateID]; predicates != nil {
|
||||
// maps in golang are references, no need to add them back
|
||||
predicates[equivalenceHash] = predicateItem
|
||||
} else {
|
||||
n.cache[predicateID] =
|
||||
resultMap{
|
||||
equivalenceHash: predicateItem,
|
||||
}
|
||||
}
|
||||
n.predicateGenerations[predicateID]++
|
||||
|
||||
klog.V(5).Infof("Cache update: node=%s, predicate=%s,pod=%s,value=%v",
|
||||
nodeInfo.Node().Name, predicateKey, podName, predicateItem)
|
||||
}
|
||||
|
||||
// lookupResult returns cached predicate results and a bool saying whether a
|
||||
// cache entry was found.
|
||||
func (n *NodeCache) lookupResult(
|
||||
podName, nodeName, predicateKey string,
|
||||
predicateID int,
|
||||
equivalenceHash uint64,
|
||||
) (value predicateResult, ok bool) {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
value, ok = n.cache[predicateID][equivalenceHash]
|
||||
if ok {
|
||||
metrics.EquivalenceCacheHits.Inc()
|
||||
} else {
|
||||
metrics.EquivalenceCacheMisses.Inc()
|
||||
}
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// invalidatePreds deletes cached predicates by given IDs.
|
||||
func (n *NodeCache) invalidatePreds(predicateIDs []int) {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
for _, predicateID := range predicateIDs {
|
||||
n.cache[predicateID] = nil
|
||||
n.predicateGenerations[predicateID]++
|
||||
}
|
||||
}
|
||||
|
||||
// invalidate invalidates node cache.
|
||||
func (n *NodeCache) invalidate() {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
n.cache = make(predicateMap, len(n.cache))
|
||||
n.generation++
|
||||
}
|
||||
|
||||
// equivalencePod is the set of pod attributes which must match for two pods to
|
||||
// be considered equivalent for scheduling purposes. For correctness, this must
|
||||
// include any Pod field which is used by a FitPredicate.
|
||||
//
|
||||
// NOTE: For equivalence hash to be formally correct, lists and maps in the
|
||||
// equivalencePod should be normalized. (e.g. by sorting them) However, the vast
|
||||
// majority of equivalent pod classes are expected to be created from a single
|
||||
// pod template, so they will all have the same ordering.
|
||||
type equivalencePod struct {
|
||||
Namespace *string
|
||||
Labels map[string]string
|
||||
Affinity *v1.Affinity
|
||||
Containers []v1.Container // See note about ordering
|
||||
InitContainers []v1.Container // See note about ordering
|
||||
NodeName *string
|
||||
NodeSelector map[string]string
|
||||
Tolerations []v1.Toleration
|
||||
Volumes []v1.Volume // See note about ordering
|
||||
}
|
||||
|
||||
// getEquivalencePod returns a normalized representation of a pod so that two
|
||||
// "equivalent" pods will hash to the same value.
|
||||
func getEquivalencePod(pod *v1.Pod) *equivalencePod {
|
||||
ep := &equivalencePod{
|
||||
Namespace: &pod.Namespace,
|
||||
Labels: pod.Labels,
|
||||
Affinity: pod.Spec.Affinity,
|
||||
Containers: pod.Spec.Containers,
|
||||
InitContainers: pod.Spec.InitContainers,
|
||||
NodeName: &pod.Spec.NodeName,
|
||||
NodeSelector: pod.Spec.NodeSelector,
|
||||
Tolerations: pod.Spec.Tolerations,
|
||||
Volumes: pod.Spec.Volumes,
|
||||
}
|
||||
// DeepHashObject considers nil and empty slices to be different. Normalize them.
|
||||
if len(ep.Containers) == 0 {
|
||||
ep.Containers = nil
|
||||
}
|
||||
if len(ep.InitContainers) == 0 {
|
||||
ep.InitContainers = nil
|
||||
}
|
||||
if len(ep.Tolerations) == 0 {
|
||||
ep.Tolerations = nil
|
||||
}
|
||||
if len(ep.Volumes) == 0 {
|
||||
ep.Volumes = nil
|
||||
}
|
||||
// Normalize empty maps also.
|
||||
if len(ep.Labels) == 0 {
|
||||
ep.Labels = nil
|
||||
}
|
||||
if len(ep.NodeSelector) == 0 {
|
||||
ep.NodeSelector = nil
|
||||
}
|
||||
// TODO(misterikkit): Also normalize nested maps and slices.
|
||||
return ep
|
||||
}
|
@ -14,24 +14,20 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
package equivalence
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
// makeBasicPod returns a Pod object with many of the fields populated.
|
||||
@ -158,16 +154,6 @@ type predicateItemType struct {
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
}
|
||||
|
||||
// upToDateCache is a fake Cache where IsUpToDate always returns true.
|
||||
type upToDateCache = schedulertesting.FakeCache
|
||||
|
||||
// staleNodeCache is a fake Cache where IsUpToDate always returns false.
|
||||
type staleNodeCache struct {
|
||||
schedulertesting.FakeCache
|
||||
}
|
||||
|
||||
func (c *staleNodeCache) IsUpToDate(*schedulercache.NodeInfo) bool { return false }
|
||||
|
||||
// mockPredicate provides an algorithm.FitPredicate with pre-set return values.
|
||||
type mockPredicate struct {
|
||||
fit bool
|
||||
@ -185,7 +171,6 @@ func TestRunPredicate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pred mockPredicate
|
||||
cache schedulercache.Cache
|
||||
expectFit, expectCacheHit, expectCacheWrite bool
|
||||
expectedReasons []algorithm.PredicateFailureReason
|
||||
expectedError string
|
||||
@ -193,7 +178,6 @@ func TestRunPredicate(t *testing.T) {
|
||||
{
|
||||
name: "pod fits/cache hit",
|
||||
pred: mockPredicate{},
|
||||
cache: &upToDateCache{},
|
||||
expectFit: true,
|
||||
expectCacheHit: true,
|
||||
expectCacheWrite: false,
|
||||
@ -201,23 +185,13 @@ func TestRunPredicate(t *testing.T) {
|
||||
{
|
||||
name: "pod fits/cache miss",
|
||||
pred: mockPredicate{fit: true},
|
||||
cache: &upToDateCache{},
|
||||
expectFit: true,
|
||||
expectCacheHit: false,
|
||||
expectCacheWrite: true,
|
||||
},
|
||||
{
|
||||
name: "pod fits/cache miss/no write",
|
||||
pred: mockPredicate{fit: true},
|
||||
cache: &staleNodeCache{},
|
||||
expectFit: true,
|
||||
expectCacheHit: false,
|
||||
expectCacheWrite: false,
|
||||
},
|
||||
{
|
||||
name: "pod doesn't fit/cache miss",
|
||||
pred: mockPredicate{reasons: []algorithm.PredicateFailureReason{predicates.ErrFakePredicate}},
|
||||
cache: &upToDateCache{},
|
||||
expectFit: false,
|
||||
expectCacheHit: false,
|
||||
expectCacheWrite: true,
|
||||
@ -226,7 +200,6 @@ func TestRunPredicate(t *testing.T) {
|
||||
{
|
||||
name: "pod doesn't fit/cache hit",
|
||||
pred: mockPredicate{},
|
||||
cache: &upToDateCache{},
|
||||
expectFit: false,
|
||||
expectCacheHit: true,
|
||||
expectCacheWrite: false,
|
||||
@ -235,7 +208,6 @@ func TestRunPredicate(t *testing.T) {
|
||||
{
|
||||
name: "predicate error",
|
||||
pred: mockPredicate{err: errors.New("This is expected")},
|
||||
cache: &upToDateCache{},
|
||||
expectFit: false,
|
||||
expectCacheHit: false,
|
||||
expectCacheWrite: false,
|
||||
@ -243,20 +215,27 @@ func TestRunPredicate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
predicatesOrdering := []string{"testPredicate"}
|
||||
predicateID := 0
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := schedulercache.NewNodeInfo()
|
||||
node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n1"}})
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n1"}}
|
||||
node.SetNode(testNode)
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1"}}
|
||||
meta := algorithm.EmptyPredicateMetadataProducer(nil, nil)
|
||||
|
||||
ecache := NewEquivalenceCache()
|
||||
equivClass := ecache.getEquivalenceClassInfo(pod)
|
||||
// Initialize and populate equivalence class cache.
|
||||
ecache := NewCache(predicatesOrdering)
|
||||
ecache.Snapshot()
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
|
||||
equivClass := NewClass(pod)
|
||||
if test.expectCacheHit {
|
||||
ecache.updateResult(pod.Name, "testPredicate", test.expectFit, test.expectedReasons, equivClass.hash, test.cache, node)
|
||||
nodeCache.updateResult(pod.Name, "testPredicate", predicateID, test.expectFit, test.expectedReasons, equivClass.hash, node)
|
||||
}
|
||||
|
||||
fit, reasons, err := ecache.RunPredicate(test.pred.predicate, "testPredicate", pod, meta, node, equivClass, test.cache)
|
||||
fit, reasons, err := nodeCache.RunPredicate(test.pred.predicate, "testPredicate", predicateID, pod, meta, node, equivClass)
|
||||
|
||||
if err != nil {
|
||||
if err.Error() != test.expectedError {
|
||||
@ -287,14 +266,14 @@ func TestRunPredicate(t *testing.T) {
|
||||
if !test.expectCacheHit && test.pred.callCount == 0 {
|
||||
t.Errorf("Predicate should be called")
|
||||
}
|
||||
_, _, invalid := ecache.lookupResult(pod.Name, node.Node().Name, "testPredicate", equivClass.hash)
|
||||
if invalid && test.expectCacheWrite {
|
||||
_, ok := nodeCache.lookupResult(pod.Name, node.Node().Name, "testPredicate", predicateID, equivClass.hash)
|
||||
if !ok && test.expectCacheWrite {
|
||||
t.Errorf("Cache write should happen")
|
||||
}
|
||||
if !test.expectCacheHit && test.expectCacheWrite && invalid {
|
||||
if !test.expectCacheHit && test.expectCacheWrite && !ok {
|
||||
t.Errorf("Cache write should happen")
|
||||
}
|
||||
if !test.expectCacheHit && !test.expectCacheWrite && !invalid {
|
||||
if !test.expectCacheHit && !test.expectCacheWrite && ok {
|
||||
t.Errorf("Cache write should not happen")
|
||||
}
|
||||
})
|
||||
@ -302,80 +281,86 @@ func TestRunPredicate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateResult(t *testing.T) {
|
||||
predicatesOrdering := []string{"GeneralPredicates"}
|
||||
tests := []struct {
|
||||
name string
|
||||
pod string
|
||||
predicateKey string
|
||||
predicateID int
|
||||
nodeName string
|
||||
fit bool
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
equivalenceHash uint64
|
||||
expectPredicateMap bool
|
||||
expectCacheItem HostPredicate
|
||||
cache schedulercache.Cache
|
||||
expectCacheItem predicateResult
|
||||
}{
|
||||
{
|
||||
name: "test 1",
|
||||
pod: "testPod",
|
||||
predicateKey: "GeneralPredicates",
|
||||
predicateID: 0,
|
||||
nodeName: "node1",
|
||||
fit: true,
|
||||
equivalenceHash: 123,
|
||||
expectPredicateMap: false,
|
||||
expectCacheItem: HostPredicate{
|
||||
expectCacheItem: predicateResult{
|
||||
Fit: true,
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
name: "test 2",
|
||||
pod: "testPod",
|
||||
predicateKey: "GeneralPredicates",
|
||||
predicateID: 0,
|
||||
nodeName: "node2",
|
||||
fit: false,
|
||||
equivalenceHash: 123,
|
||||
expectPredicateMap: true,
|
||||
expectCacheItem: HostPredicate{
|
||||
expectCacheItem: predicateResult{
|
||||
Fit: false,
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ecache := NewEquivalenceCache()
|
||||
if test.expectPredicateMap {
|
||||
ecache.algorithmCache[test.nodeName] = AlgorithmCache{}
|
||||
predicateItem := HostPredicate{
|
||||
Fit: true,
|
||||
}
|
||||
ecache.algorithmCache[test.nodeName][test.predicateKey] =
|
||||
PredicateMap{
|
||||
test.equivalenceHash: predicateItem,
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := schedulercache.NewNodeInfo()
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
|
||||
node.SetNode(testNode)
|
||||
|
||||
// Initialize and populate equivalence class cache.
|
||||
ecache := NewCache(predicatesOrdering)
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
|
||||
if test.expectPredicateMap {
|
||||
predicateItem := predicateResult{
|
||||
Fit: true,
|
||||
}
|
||||
}
|
||||
|
||||
node := schedulercache.NewNodeInfo()
|
||||
node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}})
|
||||
ecache.updateResult(
|
||||
test.pod,
|
||||
test.predicateKey,
|
||||
test.fit,
|
||||
test.reasons,
|
||||
test.equivalenceHash,
|
||||
test.cache,
|
||||
node,
|
||||
)
|
||||
|
||||
cachedMapItem, ok := ecache.algorithmCache[test.nodeName][test.predicateKey]
|
||||
if !ok {
|
||||
t.Errorf("Failed: %s, can't find expected cache item: %v",
|
||||
test.name, test.expectCacheItem)
|
||||
} else {
|
||||
if !reflect.DeepEqual(cachedMapItem[test.equivalenceHash], test.expectCacheItem) {
|
||||
t.Errorf("Failed: %s, expected cached item: %v, but got: %v",
|
||||
test.name, test.expectCacheItem, cachedMapItem[test.equivalenceHash])
|
||||
nodeCache.cache[test.predicateID] =
|
||||
resultMap{
|
||||
test.equivalenceHash: predicateItem,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeCache.updateResult(
|
||||
test.pod,
|
||||
test.predicateKey,
|
||||
test.predicateID,
|
||||
test.fit,
|
||||
test.reasons,
|
||||
test.equivalenceHash,
|
||||
node,
|
||||
)
|
||||
|
||||
cachedMapItem := nodeCache.cache[test.predicateID]
|
||||
if cachedMapItem == nil {
|
||||
t.Errorf("can't find expected cache item: %v", test.expectCacheItem)
|
||||
} else {
|
||||
if !reflect.DeepEqual(cachedMapItem[test.equivalenceHash], test.expectCacheItem) {
|
||||
t.Errorf("expected cached item: %v, but got: %v",
|
||||
test.expectCacheItem, cachedMapItem[test.equivalenceHash])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -388,146 +373,157 @@ func slicesEqual(a, b []algorithm.PredicateFailureReason) bool {
|
||||
}
|
||||
|
||||
func TestLookupResult(t *testing.T) {
|
||||
predicatesOrdering := []string{"GeneralPredicates"}
|
||||
tests := []struct {
|
||||
name string
|
||||
podName string
|
||||
nodeName string
|
||||
predicateKey string
|
||||
predicateID int
|
||||
equivalenceHashForUpdatePredicate uint64
|
||||
equivalenceHashForCalPredicate uint64
|
||||
cachedItem predicateItemType
|
||||
expectedInvalidPredicateKey bool
|
||||
expectedInvalidEquivalenceHash bool
|
||||
expectedPredicateKeyMiss bool
|
||||
expectedEquivalenceHashMiss bool
|
||||
expectedPredicateItem predicateItemType
|
||||
cache schedulercache.Cache
|
||||
}{
|
||||
{
|
||||
name: "test 1",
|
||||
podName: "testPod",
|
||||
nodeName: "node1",
|
||||
name: "test 1",
|
||||
podName: "testPod",
|
||||
nodeName: "node1",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
equivalenceHashForCalPredicate: 123,
|
||||
predicateKey: "GeneralPredicates",
|
||||
predicateID: 0,
|
||||
cachedItem: predicateItemType{
|
||||
fit: false,
|
||||
reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts},
|
||||
},
|
||||
expectedInvalidPredicateKey: true,
|
||||
expectedPredicateKeyMiss: true,
|
||||
expectedPredicateItem: predicateItemType{
|
||||
fit: false,
|
||||
reasons: []algorithm.PredicateFailureReason{},
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
name: "test 2",
|
||||
podName: "testPod",
|
||||
nodeName: "node2",
|
||||
name: "test 2",
|
||||
podName: "testPod",
|
||||
nodeName: "node2",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
equivalenceHashForCalPredicate: 123,
|
||||
predicateKey: "GeneralPredicates",
|
||||
predicateID: 0,
|
||||
cachedItem: predicateItemType{
|
||||
fit: true,
|
||||
},
|
||||
expectedInvalidPredicateKey: false,
|
||||
expectedPredicateKeyMiss: false,
|
||||
expectedPredicateItem: predicateItemType{
|
||||
fit: true,
|
||||
reasons: []algorithm.PredicateFailureReason{},
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
name: "test 3",
|
||||
podName: "testPod",
|
||||
nodeName: "node3",
|
||||
name: "test 3",
|
||||
podName: "testPod",
|
||||
nodeName: "node3",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
equivalenceHashForCalPredicate: 123,
|
||||
predicateKey: "GeneralPredicates",
|
||||
predicateID: 0,
|
||||
cachedItem: predicateItemType{
|
||||
fit: false,
|
||||
reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts},
|
||||
},
|
||||
expectedInvalidPredicateKey: false,
|
||||
expectedPredicateKeyMiss: false,
|
||||
expectedPredicateItem: predicateItemType{
|
||||
fit: false,
|
||||
reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts},
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
name: "test 4",
|
||||
podName: "testPod",
|
||||
nodeName: "node4",
|
||||
name: "test 4",
|
||||
podName: "testPod",
|
||||
nodeName: "node4",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
equivalenceHashForCalPredicate: 456,
|
||||
predicateKey: "GeneralPredicates",
|
||||
predicateID: 0,
|
||||
cachedItem: predicateItemType{
|
||||
fit: false,
|
||||
reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts},
|
||||
},
|
||||
expectedInvalidPredicateKey: false,
|
||||
expectedInvalidEquivalenceHash: true,
|
||||
expectedPredicateKeyMiss: false,
|
||||
expectedEquivalenceHashMiss: true,
|
||||
expectedPredicateItem: predicateItemType{
|
||||
fit: false,
|
||||
reasons: []algorithm.PredicateFailureReason{},
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
ecache := NewEquivalenceCache()
|
||||
node := schedulercache.NewNodeInfo()
|
||||
node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}})
|
||||
// set cached item to equivalence cache
|
||||
ecache.updateResult(
|
||||
test.podName,
|
||||
test.predicateKey,
|
||||
test.cachedItem.fit,
|
||||
test.cachedItem.reasons,
|
||||
test.equivalenceHashForUpdatePredicate,
|
||||
test.cache,
|
||||
node,
|
||||
)
|
||||
// if we want to do invalid, invalid the cached item
|
||||
if test.expectedInvalidPredicateKey {
|
||||
predicateKeys := sets.NewString()
|
||||
predicateKeys.Insert(test.predicateKey)
|
||||
ecache.InvalidateCachedPredicateItem(test.nodeName, predicateKeys)
|
||||
}
|
||||
// calculate predicate with equivalence cache
|
||||
fit, reasons, invalid := ecache.lookupResult(test.podName,
|
||||
test.nodeName,
|
||||
test.predicateKey,
|
||||
test.equivalenceHashForCalPredicate,
|
||||
)
|
||||
// returned invalid should match expectedInvalidPredicateKey or expectedInvalidEquivalenceHash
|
||||
if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate {
|
||||
if invalid != test.expectedInvalidEquivalenceHash {
|
||||
t.Errorf("Failed: %s, expected invalid: %v, but got: %v",
|
||||
test.name, test.expectedInvalidEquivalenceHash, invalid)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
|
||||
|
||||
// Initialize and populate equivalence class cache.
|
||||
ecache := NewCache(predicatesOrdering)
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
|
||||
node := schedulercache.NewNodeInfo()
|
||||
node.SetNode(testNode)
|
||||
// set cached item to equivalence cache
|
||||
nodeCache.updateResult(
|
||||
test.podName,
|
||||
test.predicateKey,
|
||||
test.predicateID,
|
||||
test.cachedItem.fit,
|
||||
test.cachedItem.reasons,
|
||||
test.equivalenceHashForUpdatePredicate,
|
||||
node,
|
||||
)
|
||||
// if we want to do invalid, invalid the cached item
|
||||
if test.expectedPredicateKeyMiss {
|
||||
predicateKeys := sets.NewString()
|
||||
predicateKeys.Insert(test.predicateKey)
|
||||
ecache.InvalidatePredicatesOnNode(test.nodeName, predicateKeys)
|
||||
}
|
||||
} else {
|
||||
if invalid != test.expectedInvalidPredicateKey {
|
||||
t.Errorf("Failed: %s, expected invalid: %v, but got: %v",
|
||||
test.name, test.expectedInvalidPredicateKey, invalid)
|
||||
// calculate predicate with equivalence cache
|
||||
result, ok := nodeCache.lookupResult(test.podName,
|
||||
test.nodeName,
|
||||
test.predicateKey,
|
||||
test.predicateID,
|
||||
test.equivalenceHashForCalPredicate,
|
||||
)
|
||||
fit, reasons := result.Fit, result.FailReasons
|
||||
// returned invalid should match expectedPredicateKeyMiss or expectedEquivalenceHashMiss
|
||||
if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate {
|
||||
if ok && test.expectedEquivalenceHashMiss {
|
||||
t.Errorf("Failed: %s, expected (equivalence hash) cache miss", test.name)
|
||||
}
|
||||
if !ok && !test.expectedEquivalenceHashMiss {
|
||||
t.Errorf("Failed: %s, expected (equivalence hash) cache hit", test.name)
|
||||
}
|
||||
} else {
|
||||
if ok && test.expectedPredicateKeyMiss {
|
||||
t.Errorf("Failed: %s, expected (predicate key) cache miss", test.name)
|
||||
}
|
||||
if !ok && !test.expectedPredicateKeyMiss {
|
||||
t.Errorf("Failed: %s, expected (predicate key) cache hit", test.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// returned predicate result should match expected predicate item
|
||||
if fit != test.expectedPredicateItem.fit {
|
||||
t.Errorf("Failed: %s, expected fit: %v, but got: %v", test.name, test.cachedItem.fit, fit)
|
||||
}
|
||||
if !slicesEqual(reasons, test.expectedPredicateItem.reasons) {
|
||||
t.Errorf("Failed: %s, expected reasons: %v, but got: %v",
|
||||
test.name, test.expectedPredicateItem.reasons, reasons)
|
||||
}
|
||||
// returned predicate result should match expected predicate item
|
||||
if fit != test.expectedPredicateItem.fit {
|
||||
t.Errorf("Failed: %s, expected fit: %v, but got: %v", test.name, test.cachedItem.fit, fit)
|
||||
}
|
||||
if !slicesEqual(reasons, test.expectedPredicateItem.reasons) {
|
||||
t.Errorf("Failed: %s, expected reasons: %v, but got: %v",
|
||||
test.name, test.expectedPredicateItem.reasons, reasons)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEquivalenceHash(t *testing.T) {
|
||||
|
||||
ecache := NewEquivalenceCache()
|
||||
|
||||
pod1 := makeBasicPod("pod1")
|
||||
pod2 := makeBasicPod("pod2")
|
||||
|
||||
@ -618,7 +614,7 @@ func TestGetEquivalenceHash(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for i, podInfo := range test.podInfoList {
|
||||
testPod := podInfo.pod
|
||||
eclassInfo := ecache.getEquivalenceClassInfo(testPod)
|
||||
eclassInfo := NewClass(testPod)
|
||||
if eclassInfo == nil && podInfo.hashIsValid {
|
||||
t.Errorf("Failed: pod %v is expected to have valid hash", testPod)
|
||||
}
|
||||
@ -644,17 +640,20 @@ func TestGetEquivalenceHash(t *testing.T) {
|
||||
|
||||
func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
|
||||
testPredicate := "GeneralPredicates"
|
||||
testPredicateID := 0
|
||||
predicatesOrdering := []string{testPredicate}
|
||||
// tests is used to initialize all nodes
|
||||
tests := []struct {
|
||||
name string
|
||||
podName string
|
||||
nodeName string
|
||||
equivalenceHashForUpdatePredicate uint64
|
||||
cachedItem predicateItemType
|
||||
cache schedulercache.Cache
|
||||
}{
|
||||
{
|
||||
podName: "testPod",
|
||||
nodeName: "node1",
|
||||
name: "hash predicate 123 not fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node1",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
cachedItem: predicateItemType{
|
||||
fit: false,
|
||||
@ -662,11 +661,11 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
|
||||
predicates.ErrPodNotFitsHostPorts,
|
||||
},
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
podName: "testPod",
|
||||
nodeName: "node2",
|
||||
name: "hash predicate 456 not fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node2",
|
||||
equivalenceHashForUpdatePredicate: 456,
|
||||
cachedItem: predicateItemType{
|
||||
fit: false,
|
||||
@ -674,217 +673,131 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
|
||||
predicates.ErrPodNotFitsHostPorts,
|
||||
},
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
podName: "testPod",
|
||||
nodeName: "node3",
|
||||
name: "hash predicate 123 fits",
|
||||
podName: "testPod",
|
||||
nodeName: "node3",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
cachedItem: predicateItemType{
|
||||
fit: true,
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
}
|
||||
ecache := NewEquivalenceCache()
|
||||
ecache := NewCache(predicatesOrdering)
|
||||
|
||||
for _, test := range tests {
|
||||
node := schedulercache.NewNodeInfo()
|
||||
node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}})
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
|
||||
node.SetNode(testNode)
|
||||
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
// set cached item to equivalence cache
|
||||
ecache.updateResult(
|
||||
nodeCache.updateResult(
|
||||
test.podName,
|
||||
testPredicate,
|
||||
testPredicateID,
|
||||
test.cachedItem.fit,
|
||||
test.cachedItem.reasons,
|
||||
test.equivalenceHashForUpdatePredicate,
|
||||
test.cache,
|
||||
node,
|
||||
)
|
||||
}
|
||||
|
||||
// invalidate cached predicate for all nodes
|
||||
ecache.InvalidateCachedPredicateItemOfAllNodes(sets.NewString(testPredicate))
|
||||
ecache.InvalidatePredicates(sets.NewString(testPredicate))
|
||||
|
||||
// there should be no cached predicate any more
|
||||
for _, test := range tests {
|
||||
if algorithmCache, exist := ecache.algorithmCache[test.nodeName]; exist {
|
||||
if _, exist := algorithmCache[testPredicate]; exist {
|
||||
t.Errorf("Failed: cached item for predicate key: %v on node: %v should be invalidated",
|
||||
testPredicate, test.nodeName)
|
||||
break
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if nodeCache, exist := ecache.nodeToCache[test.nodeName]; exist {
|
||||
if cache := nodeCache.cache[testPredicateID]; cache != nil {
|
||||
t.Errorf("Failed: cached item for predicate key: %v on node: %v should be invalidated",
|
||||
testPredicate, test.nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
|
||||
testPredicate := "GeneralPredicates"
|
||||
testPredicateID := 0
|
||||
predicatesOrdering := []string{testPredicate}
|
||||
// tests is used to initialize all nodes
|
||||
tests := []struct {
|
||||
name string
|
||||
podName string
|
||||
nodeName string
|
||||
equivalenceHashForUpdatePredicate uint64
|
||||
cachedItem predicateItemType
|
||||
cache schedulercache.Cache
|
||||
}{
|
||||
{
|
||||
podName: "testPod",
|
||||
nodeName: "node1",
|
||||
name: "hash predicate 123 not fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node1",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
cachedItem: predicateItemType{
|
||||
fit: false,
|
||||
reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts},
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
podName: "testPod",
|
||||
nodeName: "node2",
|
||||
name: "hash predicate 456 not fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node2",
|
||||
equivalenceHashForUpdatePredicate: 456,
|
||||
cachedItem: predicateItemType{
|
||||
fit: false,
|
||||
reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts},
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
{
|
||||
podName: "testPod",
|
||||
nodeName: "node3",
|
||||
name: "hash predicate 123 fits host ports",
|
||||
podName: "testPod",
|
||||
nodeName: "node3",
|
||||
equivalenceHashForUpdatePredicate: 123,
|
||||
cachedItem: predicateItemType{
|
||||
fit: true,
|
||||
},
|
||||
cache: &upToDateCache{},
|
||||
},
|
||||
}
|
||||
ecache := NewEquivalenceCache()
|
||||
ecache := NewCache(predicatesOrdering)
|
||||
|
||||
for _, test := range tests {
|
||||
node := schedulercache.NewNodeInfo()
|
||||
node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}})
|
||||
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
|
||||
node.SetNode(testNode)
|
||||
|
||||
nodeCache, _ := ecache.GetNodeCache(testNode.Name)
|
||||
// set cached item to equivalence cache
|
||||
ecache.updateResult(
|
||||
nodeCache.updateResult(
|
||||
test.podName,
|
||||
testPredicate,
|
||||
testPredicateID,
|
||||
test.cachedItem.fit,
|
||||
test.cachedItem.reasons,
|
||||
test.equivalenceHashForUpdatePredicate,
|
||||
test.cache,
|
||||
node,
|
||||
)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// invalidate cached predicate for all nodes
|
||||
ecache.InvalidateAllCachedPredicateItemOfNode(test.nodeName)
|
||||
if _, exist := ecache.algorithmCache[test.nodeName]; exist {
|
||||
t.Errorf("Failed: cached item for node: %v should be invalidated", test.nodeName)
|
||||
break
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
oldNodeCache, _ := ecache.GetNodeCache(test.nodeName)
|
||||
oldGeneration := oldNodeCache.generation
|
||||
// invalidate cached predicate for all nodes
|
||||
ecache.InvalidateAllPredicatesOnNode(test.nodeName)
|
||||
if n, _ := ecache.GetNodeCache(test.nodeName); oldGeneration == n.generation {
|
||||
t.Errorf("Failed: cached item for node: %v should be invalidated", test.nodeName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEquivalenceHash(b *testing.B) {
|
||||
pod := makeBasicPod("test")
|
||||
for i := 0; i < b.N; i++ {
|
||||
getEquivalenceHash(pod)
|
||||
}
|
||||
}
|
||||
|
||||
// syncingMockCache delegates method calls to an actual Cache,
|
||||
// but calls to UpdateNodeNameToInfoMap synchronize with the test.
|
||||
type syncingMockCache struct {
|
||||
schedulercache.Cache
|
||||
cycleStart, cacheInvalidated chan struct{}
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// UpdateNodeNameToInfoMap delegates to the real implementation, but on the first call, it
|
||||
// synchronizes with the test.
|
||||
//
|
||||
// Since UpdateNodeNameToInfoMap is one of the first steps of (*genericScheduler).Schedule, we use
|
||||
// this point to signal to the test that a scheduling cycle has started.
|
||||
func (c *syncingMockCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
||||
err := c.Cache.UpdateNodeNameToInfoMap(infoMap)
|
||||
c.once.Do(func() {
|
||||
c.cycleStart <- struct{}{}
|
||||
<-c.cacheInvalidated
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// TestEquivalenceCacheInvalidationRace tests that equivalence cache invalidation is correctly
|
||||
// handled when an invalidation event happens early in a scheduling cycle. Specifically, the event
|
||||
// occurs after schedulercache is snapshotted and before equivalence cache lock is acquired.
|
||||
func TestEquivalenceCacheInvalidationRace(t *testing.T) {
|
||||
// Create a predicate that returns false the first time and true on subsequent calls.
|
||||
podWillFit := false
|
||||
var callCount int
|
||||
testPredicate := func(pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata,
|
||||
nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
callCount++
|
||||
if !podWillFit {
|
||||
podWillFit = true
|
||||
return false, []algorithm.PredicateFailureReason{predicates.ErrFakePredicate}, nil
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// Set up the mock cache.
|
||||
cache := schedulercache.New(time.Duration(0), wait.NeverStop)
|
||||
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1"}})
|
||||
mockCache := &syncingMockCache{
|
||||
Cache: cache,
|
||||
cycleStart: make(chan struct{}),
|
||||
cacheInvalidated: make(chan struct{}),
|
||||
}
|
||||
|
||||
eCache := NewEquivalenceCache()
|
||||
// Ensure that equivalence cache invalidation happens after the scheduling cycle starts, but before
|
||||
// the equivalence cache would be updated.
|
||||
go func() {
|
||||
<-mockCache.cycleStart
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "new-pod", UID: "new-pod"},
|
||||
Spec: v1.PodSpec{NodeName: "machine1"}}
|
||||
if err := cache.AddPod(pod); err != nil {
|
||||
t.Errorf("Could not add pod to cache: %v", err)
|
||||
}
|
||||
eCache.InvalidateAllCachedPredicateItemOfNode("machine1")
|
||||
mockCache.cacheInvalidated <- struct{}{}
|
||||
}()
|
||||
|
||||
// Set up the scheduler.
|
||||
ps := map[string]algorithm.FitPredicate{"testPredicate": testPredicate}
|
||||
predicates.SetPredicatesOrdering([]string{"testPredicate"})
|
||||
prioritizers := []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}
|
||||
pvcLister := schedulertesting.FakePersistentVolumeClaimLister([]*v1.PersistentVolumeClaim{})
|
||||
scheduler := NewGenericScheduler(
|
||||
mockCache,
|
||||
eCache,
|
||||
NewSchedulingQueue(),
|
||||
ps,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
nil, nil, pvcLister, true, false)
|
||||
|
||||
// First scheduling attempt should fail.
|
||||
nodeLister := schedulertesting.FakeNodeLister(makeNodeList([]string{"machine1"}))
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}}
|
||||
machine, err := scheduler.Schedule(pod, nodeLister)
|
||||
if machine != "" || err == nil {
|
||||
t.Error("First scheduling attempt did not fail")
|
||||
}
|
||||
|
||||
// Second scheduling attempt should succeed because cache was invalidated.
|
||||
_, err = scheduler.Schedule(pod, nodeLister)
|
||||
if err != nil {
|
||||
t.Errorf("Second scheduling attempt failed: %v", err)
|
||||
}
|
||||
if callCount != 2 {
|
||||
t.Errorf("Predicate should have been called twice. Was called %d times.", callCount)
|
||||
getEquivalencePod(pod)
|
||||
}
|
||||
}
|
307
vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence_cache.go
generated
vendored
307
vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence_cache.go
generated
vendored
@ -1,307 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// EquivalenceCache holds:
|
||||
// 1. a map of AlgorithmCache with node name as key
|
||||
// 2. function to get equivalence pod
|
||||
type EquivalenceCache struct {
|
||||
mu sync.RWMutex
|
||||
algorithmCache map[string]AlgorithmCache
|
||||
}
|
||||
|
||||
// The AlgorithmCache stores PredicateMap with predicate name as key, PredicateMap as value.
|
||||
type AlgorithmCache map[string]PredicateMap
|
||||
|
||||
// PredicateMap stores HostPrediacte with equivalence hash as key
|
||||
type PredicateMap map[uint64]HostPredicate
|
||||
|
||||
// HostPredicate is the cached predicate result
|
||||
type HostPredicate struct {
|
||||
Fit bool
|
||||
FailReasons []algorithm.PredicateFailureReason
|
||||
}
|
||||
|
||||
// NewEquivalenceCache returns EquivalenceCache to speed up predicates by caching
|
||||
// result from previous scheduling.
|
||||
func NewEquivalenceCache() *EquivalenceCache {
|
||||
return &EquivalenceCache{
|
||||
algorithmCache: make(map[string]AlgorithmCache),
|
||||
}
|
||||
}
|
||||
|
||||
// RunPredicate will return a cached predicate result. In case of a cache miss, the predicate will
|
||||
// be run and its results cached for the next call.
|
||||
//
|
||||
// NOTE: RunPredicate will not update the equivalence cache if the given NodeInfo is stale.
|
||||
func (ec *EquivalenceCache) RunPredicate(
|
||||
pred algorithm.FitPredicate,
|
||||
predicateKey string,
|
||||
pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata,
|
||||
nodeInfo *schedulercache.NodeInfo,
|
||||
equivClassInfo *equivalenceClassInfo,
|
||||
cache schedulercache.Cache,
|
||||
) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
// This may happen during tests.
|
||||
return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid")
|
||||
}
|
||||
|
||||
fit, reasons, invalid := ec.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, equivClassInfo.hash)
|
||||
if !invalid {
|
||||
return fit, reasons, nil
|
||||
}
|
||||
fit, reasons, err := pred(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return fit, reasons, err
|
||||
}
|
||||
if cache != nil {
|
||||
ec.updateResult(pod.GetName(), predicateKey, fit, reasons, equivClassInfo.hash, cache, nodeInfo)
|
||||
}
|
||||
return fit, reasons, nil
|
||||
}
|
||||
|
||||
// updateResult updates the cached result of a predicate.
|
||||
func (ec *EquivalenceCache) updateResult(
|
||||
podName, predicateKey string,
|
||||
fit bool,
|
||||
reasons []algorithm.PredicateFailureReason,
|
||||
equivalenceHash uint64,
|
||||
cache schedulercache.Cache,
|
||||
nodeInfo *schedulercache.NodeInfo,
|
||||
) {
|
||||
ec.mu.Lock()
|
||||
defer ec.mu.Unlock()
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
// This may happen during tests.
|
||||
return
|
||||
}
|
||||
// Skip update if NodeInfo is stale.
|
||||
if !cache.IsUpToDate(nodeInfo) {
|
||||
return
|
||||
}
|
||||
nodeName := nodeInfo.Node().GetName()
|
||||
if _, exist := ec.algorithmCache[nodeName]; !exist {
|
||||
ec.algorithmCache[nodeName] = AlgorithmCache{}
|
||||
}
|
||||
predicateItem := HostPredicate{
|
||||
Fit: fit,
|
||||
FailReasons: reasons,
|
||||
}
|
||||
// if cached predicate map already exists, just update the predicate by key
|
||||
if predicateMap, ok := ec.algorithmCache[nodeName][predicateKey]; ok {
|
||||
// maps in golang are references, no need to add them back
|
||||
predicateMap[equivalenceHash] = predicateItem
|
||||
} else {
|
||||
ec.algorithmCache[nodeName][predicateKey] =
|
||||
PredicateMap{
|
||||
equivalenceHash: predicateItem,
|
||||
}
|
||||
}
|
||||
glog.V(5).Infof("Updated cached predicate: %v for pod: %v on node: %s, with item %v", predicateKey, podName, nodeName, predicateItem)
|
||||
}
|
||||
|
||||
// lookupResult returns cached predicate results:
|
||||
// 1. if pod fit
|
||||
// 2. reasons if pod did not fit
|
||||
// 3. if cache item is not found
|
||||
func (ec *EquivalenceCache) lookupResult(
|
||||
podName, nodeName, predicateKey string,
|
||||
equivalenceHash uint64,
|
||||
) (bool, []algorithm.PredicateFailureReason, bool) {
|
||||
ec.mu.RLock()
|
||||
defer ec.mu.RUnlock()
|
||||
glog.V(5).Infof("Begin to calculate predicate: %v for pod: %s on node: %s based on equivalence cache",
|
||||
predicateKey, podName, nodeName)
|
||||
if hostPredicate, exist := ec.algorithmCache[nodeName][predicateKey][equivalenceHash]; exist {
|
||||
if hostPredicate.Fit {
|
||||
return true, []algorithm.PredicateFailureReason{}, false
|
||||
}
|
||||
return false, hostPredicate.FailReasons, false
|
||||
}
|
||||
// is invalid
|
||||
return false, []algorithm.PredicateFailureReason{}, true
|
||||
}
|
||||
|
||||
// InvalidateCachedPredicateItem marks all items of given predicateKeys, of all pods, on the given node as invalid
|
||||
func (ec *EquivalenceCache) InvalidateCachedPredicateItem(nodeName string, predicateKeys sets.String) {
|
||||
if len(predicateKeys) == 0 {
|
||||
return
|
||||
}
|
||||
ec.mu.Lock()
|
||||
defer ec.mu.Unlock()
|
||||
for predicateKey := range predicateKeys {
|
||||
delete(ec.algorithmCache[nodeName], predicateKey)
|
||||
}
|
||||
glog.V(5).Infof("Done invalidating cached predicates: %v on node: %s", predicateKeys, nodeName)
|
||||
}
|
||||
|
||||
// InvalidateCachedPredicateItemOfAllNodes marks all items of given predicateKeys, of all pods, on all node as invalid
|
||||
func (ec *EquivalenceCache) InvalidateCachedPredicateItemOfAllNodes(predicateKeys sets.String) {
|
||||
if len(predicateKeys) == 0 {
|
||||
return
|
||||
}
|
||||
ec.mu.Lock()
|
||||
defer ec.mu.Unlock()
|
||||
// algorithmCache uses nodeName as key, so we just iterate it and invalid given predicates
|
||||
for _, algorithmCache := range ec.algorithmCache {
|
||||
for predicateKey := range predicateKeys {
|
||||
delete(algorithmCache, predicateKey)
|
||||
}
|
||||
}
|
||||
glog.V(5).Infof("Done invalidating cached predicates: %v on all node", predicateKeys)
|
||||
}
|
||||
|
||||
// InvalidateAllCachedPredicateItemOfNode marks all cached items on given node as invalid
|
||||
func (ec *EquivalenceCache) InvalidateAllCachedPredicateItemOfNode(nodeName string) {
|
||||
ec.mu.Lock()
|
||||
defer ec.mu.Unlock()
|
||||
delete(ec.algorithmCache, nodeName)
|
||||
glog.V(5).Infof("Done invalidating all cached predicates on node: %s", nodeName)
|
||||
}
|
||||
|
||||
// InvalidateCachedPredicateItemForPodAdd is a wrapper of InvalidateCachedPredicateItem for pod add case
|
||||
func (ec *EquivalenceCache) InvalidateCachedPredicateItemForPodAdd(pod *v1.Pod, nodeName string) {
|
||||
// MatchInterPodAffinity: we assume scheduler can make sure newly bound pod
|
||||
// will not break the existing inter pod affinity. So we does not need to invalidate
|
||||
// MatchInterPodAffinity when pod added.
|
||||
//
|
||||
// But when a pod is deleted, existing inter pod affinity may become invalid.
|
||||
// (e.g. this pod was preferred by some else, or vice versa)
|
||||
//
|
||||
// NOTE: assumptions above will not stand when we implemented features like
|
||||
// RequiredDuringSchedulingRequiredDuringExecution.
|
||||
|
||||
// NoDiskConflict: the newly scheduled pod fits to existing pods on this node,
|
||||
// it will also fits to equivalence class of existing pods
|
||||
|
||||
// GeneralPredicates: will always be affected by adding a new pod
|
||||
invalidPredicates := sets.NewString(predicates.GeneralPred)
|
||||
|
||||
// MaxPDVolumeCountPredicate: we check the volumes of pod to make decision.
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil {
|
||||
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred, predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred)
|
||||
} else {
|
||||
if vol.AWSElasticBlockStore != nil {
|
||||
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred)
|
||||
}
|
||||
if vol.GCEPersistentDisk != nil {
|
||||
invalidPredicates.Insert(predicates.MaxGCEPDVolumeCountPred)
|
||||
}
|
||||
if vol.AzureDisk != nil {
|
||||
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
|
||||
}
|
||||
}
|
||||
}
|
||||
ec.InvalidateCachedPredicateItem(nodeName, invalidPredicates)
|
||||
}
|
||||
|
||||
// equivalenceClassInfo holds equivalence hash which is used for checking equivalence cache.
|
||||
// We will pass this to podFitsOnNode to ensure equivalence hash is only calculated per schedule.
|
||||
type equivalenceClassInfo struct {
|
||||
// Equivalence hash.
|
||||
hash uint64
|
||||
}
|
||||
|
||||
// getEquivalenceClassInfo returns a hash of the given pod.
|
||||
// The hashing function returns the same value for any two pods that are
|
||||
// equivalent from the perspective of scheduling.
|
||||
func (ec *EquivalenceCache) getEquivalenceClassInfo(pod *v1.Pod) *equivalenceClassInfo {
|
||||
equivalencePod := getEquivalenceHash(pod)
|
||||
if equivalencePod != nil {
|
||||
hash := fnv.New32a()
|
||||
hashutil.DeepHashObject(hash, equivalencePod)
|
||||
return &equivalenceClassInfo{
|
||||
hash: uint64(hash.Sum32()),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalencePod is the set of pod attributes which must match for two pods to
|
||||
// be considered equivalent for scheduling purposes. For correctness, this must
|
||||
// include any Pod field which is used by a FitPredicate.
|
||||
//
|
||||
// NOTE: For equivalence hash to be formally correct, lists and maps in the
|
||||
// equivalencePod should be normalized. (e.g. by sorting them) However, the
|
||||
// vast majority of equivalent pod classes are expected to be created from a
|
||||
// single pod template, so they will all have the same ordering.
|
||||
type equivalencePod struct {
|
||||
Namespace *string
|
||||
Labels map[string]string
|
||||
Affinity *v1.Affinity
|
||||
Containers []v1.Container // See note about ordering
|
||||
InitContainers []v1.Container // See note about ordering
|
||||
NodeName *string
|
||||
NodeSelector map[string]string
|
||||
Tolerations []v1.Toleration
|
||||
Volumes []v1.Volume // See note about ordering
|
||||
}
|
||||
|
||||
// getEquivalenceHash returns the equivalencePod for a Pod.
|
||||
func getEquivalenceHash(pod *v1.Pod) *equivalencePod {
|
||||
ep := &equivalencePod{
|
||||
Namespace: &pod.Namespace,
|
||||
Labels: pod.Labels,
|
||||
Affinity: pod.Spec.Affinity,
|
||||
Containers: pod.Spec.Containers,
|
||||
InitContainers: pod.Spec.InitContainers,
|
||||
NodeName: &pod.Spec.NodeName,
|
||||
NodeSelector: pod.Spec.NodeSelector,
|
||||
Tolerations: pod.Spec.Tolerations,
|
||||
Volumes: pod.Spec.Volumes,
|
||||
}
|
||||
// DeepHashObject considers nil and empty slices to be different. Normalize them.
|
||||
if len(ep.Containers) == 0 {
|
||||
ep.Containers = nil
|
||||
}
|
||||
if len(ep.InitContainers) == 0 {
|
||||
ep.InitContainers = nil
|
||||
}
|
||||
if len(ep.Tolerations) == 0 {
|
||||
ep.Tolerations = nil
|
||||
}
|
||||
if len(ep.Volumes) == 0 {
|
||||
ep.Volumes = nil
|
||||
}
|
||||
// Normalize empty maps also.
|
||||
if len(ep.Labels) == 0 {
|
||||
ep.Labels = nil
|
||||
}
|
||||
if len(ep.NodeSelector) == 0 {
|
||||
ep.NodeSelector = nil
|
||||
}
|
||||
// TODO(misterikkit): Also normalize nested maps and slices.
|
||||
return ep
|
||||
}
|
7
vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go
generated
vendored
@ -107,6 +107,11 @@ func NewHTTPExtender(config *schedulerapi.ExtenderConfig) (algorithm.SchedulerEx
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name returns extenderURL to identifies the extender.
|
||||
func (h *HTTPExtender) Name() string {
|
||||
return h.extenderURL
|
||||
}
|
||||
|
||||
// IsIgnorable returns true indicates scheduling should not fail when this extender
|
||||
// is unavailable
|
||||
func (h *HTTPExtender) IsIgnorable() bool {
|
||||
@ -138,7 +143,7 @@ func (h *HTTPExtender) ProcessPreemption(
|
||||
// If extender has cached node info, pass NodeNameToMetaVictims in args.
|
||||
nodeNameToMetaVictims := convertToNodeNameToMetaVictims(nodeToVictims)
|
||||
args = &schedulerapi.ExtenderPreemptionArgs{
|
||||
Pod: pod,
|
||||
Pod: pod,
|
||||
NodeNameToMetaVictims: nodeNameToMetaVictims,
|
||||
}
|
||||
} else {
|
||||
|
80
vendor/k8s.io/kubernetes/pkg/scheduler/core/extender_test.go
generated
vendored
80
vendor/k8s.io/kubernetes/pkg/scheduler/core/extender_test.go
generated
vendored
@ -27,6 +27,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
@ -118,6 +120,10 @@ type FakeExtender struct {
|
||||
cachedNodeNameToInfo map[string]*schedulercache.NodeInfo
|
||||
}
|
||||
|
||||
func (f *FakeExtender) Name() string {
|
||||
return "FakeExtender"
|
||||
}
|
||||
|
||||
func (f *FakeExtender) IsIgnorable() bool {
|
||||
return f.ignorable
|
||||
}
|
||||
@ -454,7 +460,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
||||
// because of the errors from errorPredicateExtender and/or
|
||||
// errorPrioritizerExtender.
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
prioritizers: []algorithm.PriorityConfig{{Function: machine2Prioritizer, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
predicates: []fitPredicate{errorPredicateExtender},
|
||||
@ -492,43 +498,47 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
extenders := []algorithm.SchedulerExtender{}
|
||||
for ii := range test.extenders {
|
||||
extenders = append(extenders, &test.extenders[ii])
|
||||
}
|
||||
cache := schedulercache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, name := range test.nodes {
|
||||
cache.AddNode(createNode(name))
|
||||
}
|
||||
queue := NewSchedulingQueue()
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
nil,
|
||||
queue,
|
||||
test.predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
test.prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
extenders,
|
||||
nil,
|
||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
||||
false,
|
||||
false)
|
||||
podIgnored := &v1.Pod{}
|
||||
machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
|
||||
if test.expectsErr {
|
||||
if err == nil {
|
||||
t.Errorf("Unexpected non-error for %s, machine %s", test.name, machine)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
extenders := []algorithm.SchedulerExtender{}
|
||||
for ii := range test.extenders {
|
||||
extenders = append(extenders, &test.extenders[ii])
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, name := range test.nodes {
|
||||
cache.AddNode(createNode(name))
|
||||
}
|
||||
if test.expectedHost != machine {
|
||||
t.Errorf("Failed : %s, Expected: %s, Saw: %s", test.name, test.expectedHost, machine)
|
||||
queue := internalqueue.NewSchedulingQueue()
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
nil,
|
||||
queue,
|
||||
test.predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
test.prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
extenders,
|
||||
nil,
|
||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
||||
schedulertesting.FakePDBLister{},
|
||||
false,
|
||||
false,
|
||||
schedulerapi.DefaultPercentageOfNodesToScore)
|
||||
podIgnored := &v1.Pod{}
|
||||
machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
|
||||
if test.expectsErr {
|
||||
if err == nil {
|
||||
t.Errorf("Unexpected non-error, machine %s", machine)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if test.expectedHost != machine {
|
||||
t.Errorf("Expected: %s, Saw: %s", test.expectedHost, machine)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
266
vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go
generated
vendored
266
vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
@ -25,7 +26,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
@ -39,11 +40,22 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core/equivalence"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
)
|
||||
|
||||
const (
|
||||
// minFeasibleNodesToFind is the minimum number of nodes that would be scored
|
||||
// in each scheduling cycle. This is a semi-arbitrary value to ensure that a
|
||||
// certain minimum of nodes are checked for feasibility. This in turn helps
|
||||
// ensure a minimum level of spreading.
|
||||
minFeasibleNodesToFind = 100
|
||||
)
|
||||
|
||||
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
|
||||
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
|
||||
|
||||
@ -84,21 +96,41 @@ func (f *FitError) Error() string {
|
||||
}
|
||||
|
||||
type genericScheduler struct {
|
||||
cache schedulercache.Cache
|
||||
equivalenceCache *EquivalenceCache
|
||||
schedulingQueue SchedulingQueue
|
||||
cache schedulerinternalcache.Cache
|
||||
equivalenceCache *equivalence.Cache
|
||||
schedulingQueue internalqueue.SchedulingQueue
|
||||
predicates map[string]algorithm.FitPredicate
|
||||
priorityMetaProducer algorithm.PriorityMetadataProducer
|
||||
predicateMetaProducer algorithm.PredicateMetadataProducer
|
||||
prioritizers []algorithm.PriorityConfig
|
||||
extenders []algorithm.SchedulerExtender
|
||||
lastNodeIndexLock sync.Mutex
|
||||
lastNodeIndex uint64
|
||||
alwaysCheckAllPredicates bool
|
||||
cachedNodeInfoMap map[string]*schedulercache.NodeInfo
|
||||
volumeBinder *volumebinder.VolumeBinder
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
pdbLister algorithm.PDBLister
|
||||
disablePreemption bool
|
||||
percentageOfNodesToScore int32
|
||||
}
|
||||
|
||||
// snapshot snapshots equivalane cache and node infos for all fit and priority
|
||||
// functions.
|
||||
func (g *genericScheduler) snapshot() error {
|
||||
// IMPORTANT NOTE: We must snapshot equivalence cache before snapshotting
|
||||
// scheduler cache, otherwise stale data may be written into equivalence
|
||||
// cache, e.g.
|
||||
// 1. snapshot cache
|
||||
// 2. event arrives, updating cache and invalidating predicates or whole node cache
|
||||
// 3. snapshot ecache
|
||||
// 4. evaludate predicates
|
||||
// 5. stale result will be written to ecache
|
||||
if g.equivalenceCache != nil {
|
||||
g.equivalenceCache.Snapshot()
|
||||
}
|
||||
|
||||
// Used for all fit and priority funcs.
|
||||
return g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap)
|
||||
}
|
||||
|
||||
// Schedule tries to schedule the given pod to one of the nodes in the node list.
|
||||
@ -120,8 +152,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister
|
||||
return "", ErrNoNodesAvailable
|
||||
}
|
||||
|
||||
// Used for all fit and priority funcs.
|
||||
err = g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap)
|
||||
err = g.snapshot()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -175,6 +206,22 @@ func (g *genericScheduler) Predicates() map[string]algorithm.FitPredicate {
|
||||
return g.predicates
|
||||
}
|
||||
|
||||
// findMaxScores returns the indexes of nodes in the "priorityList" that has the highest "Score".
|
||||
func findMaxScores(priorityList schedulerapi.HostPriorityList) []int {
|
||||
maxScoreIndexes := make([]int, 0, len(priorityList)/2)
|
||||
maxScore := priorityList[0].Score
|
||||
for i, hp := range priorityList {
|
||||
if hp.Score > maxScore {
|
||||
maxScore = hp.Score
|
||||
maxScoreIndexes = maxScoreIndexes[:0]
|
||||
maxScoreIndexes = append(maxScoreIndexes, i)
|
||||
} else if hp.Score == maxScore {
|
||||
maxScoreIndexes = append(maxScoreIndexes, i)
|
||||
}
|
||||
}
|
||||
return maxScoreIndexes
|
||||
}
|
||||
|
||||
// selectHost takes a prioritized list of nodes and then picks one
|
||||
// in a round-robin manner from the nodes that had the highest score.
|
||||
func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList) (string, error) {
|
||||
@ -182,16 +229,11 @@ func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList
|
||||
return "", fmt.Errorf("empty priorityList")
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(priorityList))
|
||||
maxScore := priorityList[0].Score
|
||||
firstAfterMaxScore := sort.Search(len(priorityList), func(i int) bool { return priorityList[i].Score < maxScore })
|
||||
|
||||
g.lastNodeIndexLock.Lock()
|
||||
ix := int(g.lastNodeIndex % uint64(firstAfterMaxScore))
|
||||
maxScores := findMaxScores(priorityList)
|
||||
ix := int(g.lastNodeIndex % uint64(len(maxScores)))
|
||||
g.lastNodeIndex++
|
||||
g.lastNodeIndexLock.Unlock()
|
||||
|
||||
return priorityList[ix].Host, nil
|
||||
return priorityList[maxScores[ix]].Host, nil
|
||||
}
|
||||
|
||||
// preempt finds nodes with pods that can be preempted to make room for "pod" to
|
||||
@ -206,12 +248,12 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister,
|
||||
if !ok || fitError == nil {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
err := g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap)
|
||||
err := g.snapshot()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if !podEligibleToPreemptOthers(pod, g.cachedNodeInfoMap) {
|
||||
glog.V(5).Infof("Pod %v is not eligible for more preemption.", pod.Name)
|
||||
klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name)
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
allNodes, err := nodeLister.List()
|
||||
@ -221,13 +263,13 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister,
|
||||
if len(allNodes) == 0 {
|
||||
return nil, nil, nil, ErrNoNodesAvailable
|
||||
}
|
||||
potentialNodes := nodesWherePreemptionMightHelp(pod, allNodes, fitError.FailedPredicates)
|
||||
potentialNodes := nodesWherePreemptionMightHelp(allNodes, fitError.FailedPredicates)
|
||||
if len(potentialNodes) == 0 {
|
||||
glog.V(3).Infof("Preemption will not help schedule pod %v on any node.", pod.Name)
|
||||
klog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name)
|
||||
// In this case, we should clean-up any existing nominated node name of the pod.
|
||||
return nil, nil, []*v1.Pod{pod}, nil
|
||||
}
|
||||
pdbs, err := g.cache.ListPDBs(labels.Everything())
|
||||
pdbs, err := g.pdbLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@ -271,7 +313,7 @@ func (g *genericScheduler) processPreemptionWithExtenders(
|
||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
|
||||
if len(nodeToVictims) > 0 {
|
||||
for _, extender := range g.extenders {
|
||||
if extender.SupportsPreemption() {
|
||||
if extender.SupportsPreemption() && extender.IsInterested(pod) {
|
||||
newNodeToVictims, err := extender.ProcessPreemption(
|
||||
pod,
|
||||
nodeToVictims,
|
||||
@ -279,7 +321,7 @@ func (g *genericScheduler) processPreemptionWithExtenders(
|
||||
)
|
||||
if err != nil {
|
||||
if extender.IsIgnorable() {
|
||||
glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
|
||||
klog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
|
||||
extender, err)
|
||||
continue
|
||||
}
|
||||
@ -325,6 +367,20 @@ func (g *genericScheduler) getLowerPriorityNominatedPods(pod *v1.Pod, nodeName s
|
||||
return lowerPriorityPods
|
||||
}
|
||||
|
||||
// numFeasibleNodesToFind returns the number of feasible nodes that once found, the scheduler stops
|
||||
// its search for more feasible nodes.
|
||||
func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) int32 {
|
||||
if numAllNodes < minFeasibleNodesToFind || g.percentageOfNodesToScore <= 0 ||
|
||||
g.percentageOfNodesToScore >= 100 {
|
||||
return numAllNodes
|
||||
}
|
||||
numNodes := numAllNodes * g.percentageOfNodesToScore / 100
|
||||
if numNodes < minFeasibleNodesToFind {
|
||||
return minFeasibleNodesToFind
|
||||
}
|
||||
return numNodes
|
||||
}
|
||||
|
||||
// Filters the nodes to find the ones that fit based on the given predicate functions
|
||||
// Each node is passed through the predicate functions to determine if it is a fit
|
||||
func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, FailedPredicateMap, error) {
|
||||
@ -334,34 +390,44 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
|
||||
if len(g.predicates) == 0 {
|
||||
filtered = nodes
|
||||
} else {
|
||||
allNodes := int32(g.cache.NodeTree().NumNodes)
|
||||
numNodesToFind := g.numFeasibleNodesToFind(allNodes)
|
||||
|
||||
// Create filtered list with enough space to avoid growing it
|
||||
// and allow assigning.
|
||||
filtered = make([]*v1.Node, len(nodes))
|
||||
filtered = make([]*v1.Node, numNodesToFind)
|
||||
errs := errors.MessageCountMap{}
|
||||
var predicateResultLock sync.Mutex
|
||||
var filteredLen int32
|
||||
var (
|
||||
predicateResultLock sync.Mutex
|
||||
filteredLen int32
|
||||
equivClass *equivalence.Class
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// We can use the same metadata producer for all nodes.
|
||||
meta := g.predicateMetaProducer(pod, g.cachedNodeInfoMap)
|
||||
|
||||
var equivCacheInfo *equivalenceClassInfo
|
||||
if g.equivalenceCache != nil {
|
||||
// getEquivalenceClassInfo will return immediately if no equivalence pod found
|
||||
equivCacheInfo = g.equivalenceCache.getEquivalenceClassInfo(pod)
|
||||
equivClass = equivalence.NewClass(pod)
|
||||
}
|
||||
|
||||
checkNode := func(i int) {
|
||||
nodeName := nodes[i].Name
|
||||
var nodeCache *equivalence.NodeCache
|
||||
nodeName := g.cache.NodeTree().Next()
|
||||
if g.equivalenceCache != nil {
|
||||
nodeCache = g.equivalenceCache.LoadNodeCache(nodeName)
|
||||
}
|
||||
fits, failedPredicates, err := podFitsOnNode(
|
||||
pod,
|
||||
meta,
|
||||
g.cachedNodeInfoMap[nodeName],
|
||||
g.predicates,
|
||||
g.cache,
|
||||
g.equivalenceCache,
|
||||
nodeCache,
|
||||
g.schedulingQueue,
|
||||
g.alwaysCheckAllPredicates,
|
||||
equivCacheInfo,
|
||||
equivClass,
|
||||
)
|
||||
if err != nil {
|
||||
predicateResultLock.Lock()
|
||||
@ -370,14 +436,24 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
|
||||
return
|
||||
}
|
||||
if fits {
|
||||
filtered[atomic.AddInt32(&filteredLen, 1)-1] = nodes[i]
|
||||
length := atomic.AddInt32(&filteredLen, 1)
|
||||
if length > numNodesToFind {
|
||||
cancel()
|
||||
atomic.AddInt32(&filteredLen, -1)
|
||||
} else {
|
||||
filtered[length-1] = g.cachedNodeInfoMap[nodeName].Node()
|
||||
}
|
||||
} else {
|
||||
predicateResultLock.Lock()
|
||||
failedPredicateMap[nodeName] = failedPredicates
|
||||
predicateResultLock.Unlock()
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(nodes), checkNode)
|
||||
|
||||
// Stops searching for more nodes once the configured number of feasible nodes
|
||||
// are found.
|
||||
workqueue.ParallelizeUntil(ctx, 16, int(allNodes), checkNode)
|
||||
|
||||
filtered = filtered[:filteredLen]
|
||||
if len(errs) > 0 {
|
||||
return []*v1.Node{}, FailedPredicateMap{}, errors.CreateAggregateFromMessageCountMap(errs)
|
||||
@ -392,7 +468,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
|
||||
filteredList, failedMap, err := extender.Filter(pod, filtered, g.cachedNodeInfoMap)
|
||||
if err != nil {
|
||||
if extender.IsIgnorable() {
|
||||
glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
|
||||
klog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
|
||||
extender, err)
|
||||
continue
|
||||
} else {
|
||||
@ -418,8 +494,8 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
|
||||
// addNominatedPods adds pods with equal or greater priority which are nominated
|
||||
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
|
||||
// any pod was found, 2) augmented meta data, 3) augmented nodeInfo.
|
||||
func addNominatedPods(podPriority int32, meta algorithm.PredicateMetadata,
|
||||
nodeInfo *schedulercache.NodeInfo, queue SchedulingQueue) (bool, algorithm.PredicateMetadata,
|
||||
func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
|
||||
nodeInfo *schedulercache.NodeInfo, queue internalqueue.SchedulingQueue) (bool, algorithm.PredicateMetadata,
|
||||
*schedulercache.NodeInfo) {
|
||||
if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
// This may happen only in tests.
|
||||
@ -435,7 +511,7 @@ func addNominatedPods(podPriority int32, meta algorithm.PredicateMetadata,
|
||||
}
|
||||
nodeInfoOut := nodeInfo.Clone()
|
||||
for _, p := range nominatedPods {
|
||||
if util.GetPodPriority(p) >= podPriority {
|
||||
if util.GetPodPriority(p) >= util.GetPodPriority(pod) && p.UID != pod.UID {
|
||||
nodeInfoOut.AddPod(p)
|
||||
if metaOut != nil {
|
||||
metaOut.AddPod(p, nodeInfoOut)
|
||||
@ -460,11 +536,10 @@ func podFitsOnNode(
|
||||
meta algorithm.PredicateMetadata,
|
||||
info *schedulercache.NodeInfo,
|
||||
predicateFuncs map[string]algorithm.FitPredicate,
|
||||
cache schedulercache.Cache,
|
||||
ecache *EquivalenceCache,
|
||||
queue SchedulingQueue,
|
||||
nodeCache *equivalence.NodeCache,
|
||||
queue internalqueue.SchedulingQueue,
|
||||
alwaysCheckAllPredicates bool,
|
||||
equivCacheInfo *equivalenceClassInfo,
|
||||
equivClass *equivalence.Class,
|
||||
) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var (
|
||||
eCacheAvailable bool
|
||||
@ -494,15 +569,15 @@ func podFitsOnNode(
|
||||
metaToUse := meta
|
||||
nodeInfoToUse := info
|
||||
if i == 0 {
|
||||
podsAdded, metaToUse, nodeInfoToUse = addNominatedPods(util.GetPodPriority(pod), meta, info, queue)
|
||||
podsAdded, metaToUse, nodeInfoToUse = addNominatedPods(pod, meta, info, queue)
|
||||
} else if !podsAdded || len(failedPredicates) != 0 {
|
||||
break
|
||||
}
|
||||
// Bypass eCache if node has any nominated pods.
|
||||
// TODO(bsalamat): consider using eCache and adding proper eCache invalidations
|
||||
// when pods are nominated or their nominations change.
|
||||
eCacheAvailable = equivCacheInfo != nil && !podsAdded
|
||||
for _, predicateKey := range predicates.Ordering() {
|
||||
eCacheAvailable = equivClass != nil && nodeCache != nil && !podsAdded
|
||||
for predicateID, predicateKey := range predicates.Ordering() {
|
||||
var (
|
||||
fit bool
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
@ -511,7 +586,7 @@ func podFitsOnNode(
|
||||
//TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric
|
||||
if predicate, exist := predicateFuncs[predicateKey]; exist {
|
||||
if eCacheAvailable {
|
||||
fit, reasons, err = ecache.RunPredicate(predicate, predicateKey, pod, metaToUse, nodeInfoToUse, equivCacheInfo, cache)
|
||||
fit, reasons, err = nodeCache.RunPredicate(predicate, predicateKey, predicateID, pod, metaToUse, nodeInfoToUse, equivClass)
|
||||
} else {
|
||||
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
|
||||
}
|
||||
@ -524,8 +599,8 @@ func podFitsOnNode(
|
||||
failedPredicates = append(failedPredicates, reasons...)
|
||||
// if alwaysCheckAllPredicates is false, short circuit all predicates when one predicate fails.
|
||||
if !alwaysCheckAllPredicates {
|
||||
glog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate" +
|
||||
"evaluation is short circuited and there are chances" +
|
||||
klog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate " +
|
||||
"evaluation is short circuited and there are chances " +
|
||||
"of other predicates failing as well.")
|
||||
break
|
||||
}
|
||||
@ -578,37 +653,38 @@ func PrioritizeNodes(
|
||||
|
||||
results := make([]schedulerapi.HostPriorityList, len(priorityConfigs), len(priorityConfigs))
|
||||
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Function != nil {
|
||||
// DEPRECATED
|
||||
wg.Add(1)
|
||||
go func(index int, config algorithm.PriorityConfig) {
|
||||
defer wg.Done()
|
||||
var err error
|
||||
results[index], err = config.Function(pod, nodeNameToInfo, nodes)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
}(i, priorityConfig)
|
||||
} else {
|
||||
// DEPRECATED: we can remove this when all priorityConfigs implement the
|
||||
// Map-Reduce pattern.
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(priorityConfigs), func(i int) {
|
||||
priorityConfig := priorityConfigs[i]
|
||||
if priorityConfig.Function == nil {
|
||||
results[i] = make(schedulerapi.HostPriorityList, len(nodes))
|
||||
return
|
||||
}
|
||||
}
|
||||
processNode := func(index int) {
|
||||
nodeInfo := nodeNameToInfo[nodes[index].Name]
|
||||
|
||||
var err error
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Function != nil {
|
||||
results[i], err = priorityConfig.Function(pod, nodeNameToInfo, nodes)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
})
|
||||
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
|
||||
nodeInfo := nodeNameToInfo[nodes[index].Name]
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Function != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
results[i][index], err = priorityConfigs[i].Map(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
return
|
||||
results[i][index].Host = nodes[index].Name
|
||||
}
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(nodes), processNode)
|
||||
})
|
||||
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Reduce == nil {
|
||||
continue
|
||||
@ -619,9 +695,9 @@ func PrioritizeNodes(
|
||||
if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
if glog.V(10) {
|
||||
if klog.V(10) {
|
||||
for _, hostPriority := range results[index] {
|
||||
glog.Infof("%v -> %v: %v, Score: (%d)", pod.Name, hostPriority.Host, config.Name, hostPriority.Score)
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score)
|
||||
}
|
||||
}
|
||||
}(i, priorityConfig)
|
||||
@ -659,6 +735,9 @@ func PrioritizeNodes(
|
||||
mu.Lock()
|
||||
for i := range *prioritizedList {
|
||||
host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
|
||||
if klog.V(10) {
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score)
|
||||
}
|
||||
combinedScores[host] += score * weight
|
||||
}
|
||||
mu.Unlock()
|
||||
@ -671,9 +750,9 @@ func PrioritizeNodes(
|
||||
}
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
if klog.V(10) {
|
||||
for i := range result {
|
||||
glog.V(10).Infof("Host %s => Score %d", result[i].Host, result[i].Score)
|
||||
klog.Infof("Host %s => Score %d", result[i].Host, result[i].Score)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
@ -802,7 +881,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims)
|
||||
if lenNodes2 > 0 {
|
||||
return minNodes2[0]
|
||||
}
|
||||
glog.Errorf("Error in logic of node scoring for preemption. We should never reach here!")
|
||||
klog.Errorf("Error in logic of node scoring for preemption. We should never reach here!")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -813,10 +892,9 @@ func selectNodesForPreemption(pod *v1.Pod,
|
||||
potentialNodes []*v1.Node,
|
||||
predicates map[string]algorithm.FitPredicate,
|
||||
metadataProducer algorithm.PredicateMetadataProducer,
|
||||
queue SchedulingQueue,
|
||||
queue internalqueue.SchedulingQueue,
|
||||
pdbs []*policy.PodDisruptionBudget,
|
||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
|
||||
|
||||
nodeToVictims := map[*v1.Node]*schedulerapi.Victims{}
|
||||
var resultLock sync.Mutex
|
||||
|
||||
@ -839,7 +917,7 @@ func selectNodesForPreemption(pod *v1.Pod,
|
||||
resultLock.Unlock()
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(potentialNodes), checkNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(potentialNodes), checkNode)
|
||||
return nodeToVictims, nil
|
||||
}
|
||||
|
||||
@ -902,9 +980,12 @@ func selectVictimsOnNode(
|
||||
meta algorithm.PredicateMetadata,
|
||||
nodeInfo *schedulercache.NodeInfo,
|
||||
fitPredicates map[string]algorithm.FitPredicate,
|
||||
queue SchedulingQueue,
|
||||
queue internalqueue.SchedulingQueue,
|
||||
pdbs []*policy.PodDisruptionBudget,
|
||||
) ([]*v1.Pod, int, bool) {
|
||||
if nodeInfo == nil {
|
||||
return nil, 0, false
|
||||
}
|
||||
potentialVictims := util.SortableList{CompFunc: util.HigherPriorityPod}
|
||||
nodeInfoCopy := nodeInfo.Clone()
|
||||
|
||||
@ -935,9 +1016,9 @@ func selectVictimsOnNode(
|
||||
// that we should check is if the "pod" is failing to schedule due to pod affinity
|
||||
// failure.
|
||||
// TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance.
|
||||
if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, nil, queue, false, nil); !fits {
|
||||
if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false, nil); !fits {
|
||||
if err != nil {
|
||||
glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
|
||||
klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
|
||||
}
|
||||
return nil, 0, false
|
||||
}
|
||||
@ -949,11 +1030,11 @@ func selectVictimsOnNode(
|
||||
violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims.Items, pdbs)
|
||||
reprievePod := func(p *v1.Pod) bool {
|
||||
addPod(p)
|
||||
fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, nil, queue, false, nil)
|
||||
fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false, nil)
|
||||
if !fits {
|
||||
removePod(p)
|
||||
victims = append(victims, p)
|
||||
glog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name)
|
||||
klog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name)
|
||||
}
|
||||
return fits
|
||||
}
|
||||
@ -971,7 +1052,7 @@ func selectVictimsOnNode(
|
||||
|
||||
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
|
||||
// that may be satisfied by removing pods from the node.
|
||||
func nodesWherePreemptionMightHelp(pod *v1.Pod, nodes []*v1.Node, failedPredicatesMap FailedPredicateMap) []*v1.Node {
|
||||
func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedPredicateMap) []*v1.Node {
|
||||
potentialNodes := []*v1.Node{}
|
||||
for _, node := range nodes {
|
||||
unresolvableReasonExist := false
|
||||
@ -985,11 +1066,19 @@ func nodesWherePreemptionMightHelp(pod *v1.Pod, nodes []*v1.Node, failedPredicat
|
||||
switch failedPredicate {
|
||||
case
|
||||
predicates.ErrNodeSelectorNotMatch,
|
||||
predicates.ErrPodAffinityRulesNotMatch,
|
||||
predicates.ErrPodNotMatchHostName,
|
||||
predicates.ErrTaintsTolerationsNotMatch,
|
||||
predicates.ErrNodeLabelPresenceViolated,
|
||||
// Node conditions won't change when scheduler simulates removal of preemption victims.
|
||||
// So, it is pointless to try nodes that have not been able to host the pod due to node
|
||||
// conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, ....
|
||||
predicates.ErrNodeNotReady,
|
||||
predicates.ErrNodeNetworkUnavailable,
|
||||
predicates.ErrNodeUnderDiskPressure,
|
||||
predicates.ErrNodeUnderPIDPressure,
|
||||
predicates.ErrNodeUnderMemoryPressure,
|
||||
predicates.ErrNodeOutOfDisk,
|
||||
predicates.ErrNodeUnschedulable,
|
||||
predicates.ErrNodeUnknownCondition,
|
||||
predicates.ErrVolumeZoneConflict,
|
||||
@ -997,11 +1086,10 @@ func nodesWherePreemptionMightHelp(pod *v1.Pod, nodes []*v1.Node, failedPredicat
|
||||
predicates.ErrVolumeBindConflict:
|
||||
unresolvableReasonExist = true
|
||||
break
|
||||
// TODO(bsalamat): Please add affinity failure cases once we have specific affinity failure errors.
|
||||
}
|
||||
}
|
||||
if !found || !unresolvableReasonExist {
|
||||
glog.V(3).Infof("Node %v is a potential node for preemption.", node.Name)
|
||||
klog.V(3).Infof("Node %v is a potential node for preemption.", node.Name)
|
||||
potentialNodes = append(potentialNodes, node)
|
||||
}
|
||||
}
|
||||
@ -1057,9 +1145,9 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
|
||||
|
||||
// NewGenericScheduler creates a genericScheduler object.
|
||||
func NewGenericScheduler(
|
||||
cache schedulercache.Cache,
|
||||
eCache *EquivalenceCache,
|
||||
podQueue SchedulingQueue,
|
||||
cache schedulerinternalcache.Cache,
|
||||
eCache *equivalence.Cache,
|
||||
podQueue internalqueue.SchedulingQueue,
|
||||
predicates map[string]algorithm.FitPredicate,
|
||||
predicateMetaProducer algorithm.PredicateMetadataProducer,
|
||||
prioritizers []algorithm.PriorityConfig,
|
||||
@ -1067,8 +1155,10 @@ func NewGenericScheduler(
|
||||
extenders []algorithm.SchedulerExtender,
|
||||
volumeBinder *volumebinder.VolumeBinder,
|
||||
pvcLister corelisters.PersistentVolumeClaimLister,
|
||||
pdbLister algorithm.PDBLister,
|
||||
alwaysCheckAllPredicates bool,
|
||||
disablePreemption bool,
|
||||
percentageOfNodesToScore int32,
|
||||
) algorithm.ScheduleAlgorithm {
|
||||
return &genericScheduler{
|
||||
cache: cache,
|
||||
@ -1082,7 +1172,9 @@ func NewGenericScheduler(
|
||||
cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo),
|
||||
volumeBinder: volumeBinder,
|
||||
pvcLister: pvcLister,
|
||||
pdbLister: pdbLister,
|
||||
alwaysCheckAllPredicates: alwaysCheckAllPredicates,
|
||||
disablePreemption: disablePreemption,
|
||||
percentageOfNodesToScore: percentageOfNodesToScore,
|
||||
}
|
||||
}
|
||||
|
816
vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler_test.go
generated
vendored
816
vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
131
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
131
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
@ -1,128 +1,88 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cache_comparer.go",
|
||||
"factory.go",
|
||||
"plugins.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"signal.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"signal_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
"signal.go",
|
||||
"signal_windows.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/factory",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/validation:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/core/equivalence:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache/debugger:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_comparer_test.go",
|
||||
"factory_test.go",
|
||||
"plugins_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testing:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/latest:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/internal/cache/fake:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -137,4 +97,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
228
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer_test.go
generated
vendored
228
vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer_test.go
generated
vendored
@ -1,228 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
func TestCompareNodes(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
nodes := []*v1.Node{}
|
||||
for _, nodeName := range test.actual {
|
||||
node := &v1.Node{}
|
||||
node.Name = nodeName
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
nodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
for _, nodeName := range test.cached {
|
||||
nodeInfo[nodeName] = &schedulercache.NodeInfo{}
|
||||
}
|
||||
|
||||
m, r := compare.CompareNodes(nodes, nodeInfo)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparePods(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
queued []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
queued: []string{},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"foo", "foobar"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
queued: []string{},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"foo"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
queued: []string{},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"foobar", "foo"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pods := []*v1.Pod{}
|
||||
for _, uid := range test.actual {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
queuedPods := []*v1.Pod{}
|
||||
for _, uid := range test.queued {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
queuedPods = append(queuedPods, pod)
|
||||
}
|
||||
|
||||
nodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
for _, uid := range test.cached {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
pod.Namespace = "ns"
|
||||
pod.Name = uid
|
||||
|
||||
nodeInfo[uid] = schedulercache.NewNodeInfo(pod)
|
||||
}
|
||||
|
||||
m, r := compare.ComparePods(pods, queuedPods, nodeInfo)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparePdbs(t *testing.T) {
|
||||
compare := compareStrategy{}
|
||||
|
||||
tests := []struct {
|
||||
actual []string
|
||||
cached []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pdbs := []*policy.PodDisruptionBudget{}
|
||||
for _, uid := range test.actual {
|
||||
pdb := &policy.PodDisruptionBudget{}
|
||||
pdb.UID = types.UID(uid)
|
||||
pdbs = append(pdbs, pdb)
|
||||
}
|
||||
|
||||
cache := make(map[string]*policy.PodDisruptionBudget)
|
||||
for _, uid := range test.cached {
|
||||
pdb := &policy.PodDisruptionBudget{}
|
||||
pdb.UID = types.UID(uid)
|
||||
cache[uid] = pdb
|
||||
}
|
||||
|
||||
m, r := compare.ComparePdbs(pdbs, cache)
|
||||
|
||||
if !reflect.DeepEqual(m, test.missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", test.missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, test.redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", test.redundant, r)
|
||||
}
|
||||
}
|
||||
}
|
634
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
634
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
File diff suppressed because it is too large
Load Diff
366
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
366
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
@ -19,8 +19,6 @@ package factory
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -28,20 +26,20 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
fakeV1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
apitesting "k8s.io/kubernetes/pkg/api/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
@ -49,18 +47,14 @@ import (
|
||||
const (
|
||||
enableEquivalenceCache = true
|
||||
disablePodPreemption = false
|
||||
bindTimeoutSeconds = 600
|
||||
)
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
factory.Create()
|
||||
}
|
||||
|
||||
@ -70,15 +64,10 @@ func TestCreateFromConfig(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
@ -115,15 +104,10 @@ func TestCreateFromConfigWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
// Pre-register some predicate and priority functions
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
@ -161,15 +145,10 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
||||
var configData []byte
|
||||
var policy schedulerapi.Policy
|
||||
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
configData = []byte(`{}`)
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
@ -183,15 +162,10 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
||||
// predicate/priority.
|
||||
// The predicate/priority from DefaultProvider will be used.
|
||||
func TestCreateFromConfigWithUnspecifiedPredicatesOrPriorities(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
@ -223,15 +197,10 @@ func TestCreateFromConfigWithUnspecifiedPredicatesOrPriorities(t *testing.T) {
|
||||
// predicate/priority.
|
||||
// Empty predicate/priority sets will be used.
|
||||
func TestCreateFromConfigWithEmptyPredicatesOrPriorities(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
|
||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||
RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
|
||||
@ -282,24 +251,16 @@ func TestDefaultErrorFunc(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"},
|
||||
Spec: apitesting.V1DeepEqualSafePodSpec(),
|
||||
}
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: runtime.EncodeOrDie(schedulertesting.Test.Codec(), testPod),
|
||||
T: t,
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// FakeHandler mustn't be sent requests other than the one you want to test.
|
||||
mux.Handle(schedulertesting.Test.ResourcePath(string(v1.ResourcePods), "bar", "foo"), &handler)
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight)
|
||||
queue := &core.FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, v1.DefaultHardPodAffinitySymmetricWeight, stopCh)
|
||||
queue := &internalqueue.FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
podBackoff := util.CreatePodBackoff(1*time.Millisecond, 1*time.Second)
|
||||
errFunc := factory.MakeDefaultErrorFunc(podBackoff, queue)
|
||||
|
||||
errFunc(testPod, nil)
|
||||
|
||||
for {
|
||||
// This is a terrible way to do this but I plan on replacing this
|
||||
// whole error handling system in the future. The test will time
|
||||
@ -309,7 +270,27 @@ func TestDefaultErrorFunc(t *testing.T) {
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
handler.ValidateRequest(t, schedulertesting.Test.ResourcePath(string(v1.ResourcePods), "bar", "foo"), "GET", nil)
|
||||
requestReceived := false
|
||||
actions := client.Actions()
|
||||
for _, a := range actions {
|
||||
if a.GetVerb() == "get" {
|
||||
getAction, ok := a.(clienttesting.GetAction)
|
||||
if !ok {
|
||||
t.Errorf("Can't cast action object to GetAction interface")
|
||||
break
|
||||
}
|
||||
name := getAction.GetName()
|
||||
ns := a.GetNamespace()
|
||||
if name != "foo" || ns != "bar" {
|
||||
t.Errorf("Expected name %s namespace %s, got %s %s",
|
||||
"foo", "bar", name, ns)
|
||||
}
|
||||
requestReceived = true
|
||||
}
|
||||
}
|
||||
if !requestReceived {
|
||||
t.Errorf("Get pod request not received")
|
||||
}
|
||||
if e, a := testPod, got; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
@ -331,64 +312,81 @@ func TestNodeEnumerator(t *testing.T) {
|
||||
t.Fatalf("expected %v, got %v", e, a)
|
||||
}
|
||||
for i := range testList.Items {
|
||||
gotObj := me.Get(i)
|
||||
if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %#v, got %v#", e, a)
|
||||
}
|
||||
t.Run(fmt.Sprintf("node enumerator/%v", i), func(t *testing.T) {
|
||||
gotObj := me.Get(i)
|
||||
if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Expected %#v, got %v#", e, a)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBind(t *testing.T) {
|
||||
table := []struct {
|
||||
name string
|
||||
binding *v1.Binding
|
||||
}{
|
||||
{binding: &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Name: "foo",
|
||||
{
|
||||
name: "binding can bind and validate request",
|
||||
binding: &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Name: "foo",
|
||||
},
|
||||
Target: v1.ObjectReference{
|
||||
Name: "foohost.kubernetes.mydomain.com",
|
||||
},
|
||||
},
|
||||
Target: v1.ObjectReference{
|
||||
Name: "foohost.kubernetes.mydomain.com",
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
b := binder{client}
|
||||
for _, test := range table {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testBind(test.binding, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.Bind(item.binding); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
expectedBody := runtime.EncodeOrDie(schedulertesting.Test.Codec(), item.binding)
|
||||
handler.ValidateRequest(t,
|
||||
schedulertesting.Test.SubResourcePath(string(v1.ResourcePods), metav1.NamespaceDefault, "foo", "binding"),
|
||||
"POST", &expectedBody)
|
||||
func testBind(binding *v1.Binding, t *testing.T) {
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: binding.GetName(), Namespace: metav1.NamespaceDefault},
|
||||
Spec: apitesting.V1DeepEqualSafePodSpec(),
|
||||
}
|
||||
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}})
|
||||
|
||||
b := binder{client}
|
||||
|
||||
if err := b.Bind(binding); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
pod := client.CoreV1().Pods(metav1.NamespaceDefault).(*fakeV1.FakePods)
|
||||
|
||||
bind, err := pod.GetBinding(binding.GetName())
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
expectedBody := runtime.EncodeOrDie(schedulertesting.Test.Codec(), binding)
|
||||
bind.APIVersion = ""
|
||||
bind.Kind = ""
|
||||
body := runtime.EncodeOrDie(schedulertesting.Test.Codec(), bind)
|
||||
if expectedBody != body {
|
||||
t.Errorf("Expected body %s, Got %s", expectedBody, body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
client := fake.NewSimpleClientset()
|
||||
// factory of "default-scheduler"
|
||||
factory := newConfigFactory(client, -1)
|
||||
stopCh := make(chan struct{})
|
||||
factory := newConfigFactory(client, -1, stopCh)
|
||||
defer close(stopCh)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: invalid hardPodAffinitySymmetricWeight, got nothing")
|
||||
@ -396,48 +394,49 @@ func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInvalidFactoryArgs(t *testing.T) {
|
||||
handler := utiltesting.FakeHandler{
|
||||
StatusCode: 500,
|
||||
ResponseBody: "",
|
||||
T: t,
|
||||
}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
hardPodAffinitySymmetricWeight int32
|
||||
expectErr string
|
||||
}{
|
||||
{
|
||||
name: "symmetric weight below range",
|
||||
hardPodAffinitySymmetricWeight: -1,
|
||||
expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100",
|
||||
},
|
||||
{
|
||||
name: "symmetric weight above range",
|
||||
hardPodAffinitySymmetricWeight: 101,
|
||||
expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
factory := newConfigFactory(client, test.hardPodAffinitySymmetricWeight)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: %s, got nothing", test.expectErr)
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
factory := newConfigFactory(client, test.hardPodAffinitySymmetricWeight, stopCh)
|
||||
defer close(stopCh)
|
||||
_, err := factory.Create()
|
||||
if err == nil {
|
||||
t.Errorf("expected err: %s, got nothing", test.expectErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSkipPodUpdate(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
table := []struct {
|
||||
pod *v1.Pod
|
||||
isAssumedPodFunc func(*v1.Pod) bool
|
||||
getPodFunc func(*v1.Pod) *v1.Pod
|
||||
expected bool
|
||||
name string
|
||||
}{
|
||||
// Non-assumed pod should not be skipped.
|
||||
{
|
||||
name: "Non-assumed pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
@ -453,9 +452,8 @@ func TestSkipPodUpdate(t *testing.T) {
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
// Pod update (with changes on ResourceVersion, Spec.NodeName and/or
|
||||
// Annotations) for an already assumed pod should be skipped.
|
||||
{
|
||||
name: "with changes on ResourceVersion, Spec.NodeName and/or Annotations",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
@ -483,9 +481,8 @@ func TestSkipPodUpdate(t *testing.T) {
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
// Pod update (with changes on Labels) for an already assumed pod
|
||||
// should not be skipped.
|
||||
{
|
||||
name: "with changes on Labels",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-0",
|
||||
@ -505,23 +502,26 @@ func TestSkipPodUpdate(t *testing.T) {
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
} {
|
||||
c := &configFactory{
|
||||
schedulerCache: &schedulertesting.FakeCache{
|
||||
IsAssumedPodFunc: test.isAssumedPodFunc,
|
||||
GetPodFunc: test.getPodFunc,
|
||||
},
|
||||
}
|
||||
got := c.skipPodUpdate(test.pod)
|
||||
if got != test.expected {
|
||||
t.Errorf("skipPodUpdate() = %t, expected = %t", got, test.expected)
|
||||
}
|
||||
}
|
||||
for _, test := range table {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
c := &configFactory{
|
||||
schedulerCache: &fakecache.Cache{
|
||||
IsAssumedPodFunc: test.isAssumedPodFunc,
|
||||
GetPodFunc: test.getPodFunc,
|
||||
},
|
||||
}
|
||||
got := c.skipPodUpdate(test.pod)
|
||||
if got != test.expected {
|
||||
t.Errorf("skipPodUpdate() = %t, expected = %t", got, test.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigFactory(client *clientset.Clientset, hardPodAffinitySymmetricWeight int32) scheduler.Configurator {
|
||||
func newConfigFactory(client clientset.Interface, hardPodAffinitySymmetricWeight int32, stopCh <-chan struct{}) Configurator {
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
return NewConfigFactory(
|
||||
return NewConfigFactory(&ConfigFactoryArgs{
|
||||
v1.DefaultSchedulerName,
|
||||
client,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
@ -529,15 +529,18 @@ func newConfigFactory(client *clientset.Clientset, hardPodAffinitySymmetricWeigh
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Apps().V1().ReplicaSets(),
|
||||
informerFactory.Apps().V1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
hardPodAffinitySymmetricWeight,
|
||||
enableEquivalenceCache,
|
||||
disablePodPreemption,
|
||||
)
|
||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
bindTimeoutSeconds,
|
||||
stopCh,
|
||||
})
|
||||
}
|
||||
|
||||
type fakeExtender struct {
|
||||
@ -546,6 +549,10 @@ type fakeExtender struct {
|
||||
ignorable bool
|
||||
}
|
||||
|
||||
func (f *fakeExtender) Name() string {
|
||||
return "fakeExtender"
|
||||
}
|
||||
|
||||
func (f *fakeExtender) IsIgnorable() bool {
|
||||
return f.ignorable
|
||||
}
|
||||
@ -593,24 +600,22 @@ func (f *fakeExtender) IsInterested(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
func TestGetBinderFunc(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
podName string
|
||||
extenders []algorithm.SchedulerExtender
|
||||
|
||||
table := []struct {
|
||||
podName string
|
||||
extenders []algorithm.SchedulerExtender
|
||||
expectedBinderType string
|
||||
name string
|
||||
}{
|
||||
// Expect to return the default binder because the extender is not a
|
||||
// binder, even though it's interested in the pod.
|
||||
{
|
||||
name: "the extender is not a binder",
|
||||
podName: "pod0",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
|
||||
},
|
||||
expectedBinderType: "*factory.binder",
|
||||
},
|
||||
// Expect to return the fake binder because one of the extenders is a
|
||||
// binder and it's interested in the pod.
|
||||
{
|
||||
name: "one of the extenders is a binder and interested in pod",
|
||||
podName: "pod0",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod0"},
|
||||
@ -618,9 +623,8 @@ func TestGetBinderFunc(t *testing.T) {
|
||||
},
|
||||
expectedBinderType: "*factory.fakeExtender",
|
||||
},
|
||||
// Expect to return the default binder because one of the extenders is
|
||||
// a binder but the binder is not interested in the pod.
|
||||
{
|
||||
name: "one of the extenders is a binder, but not interested in pod",
|
||||
podName: "pod1",
|
||||
extenders: []algorithm.SchedulerExtender{
|
||||
&fakeExtender{isBinder: false, interestedPodName: "pod1"},
|
||||
@ -628,20 +632,28 @@ func TestGetBinderFunc(t *testing.T) {
|
||||
},
|
||||
expectedBinderType: "*factory.binder",
|
||||
},
|
||||
} {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: test.podName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
f := &configFactory{}
|
||||
binderFunc := f.getBinderFunc(test.extenders)
|
||||
binder := binderFunc(pod)
|
||||
|
||||
binderType := fmt.Sprintf("%s", reflect.TypeOf(binder))
|
||||
if binderType != test.expectedBinderType {
|
||||
t.Errorf("Expected binder %q but got %q", test.expectedBinderType, binderType)
|
||||
}
|
||||
for _, test := range table {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testGetBinderFunc(test.expectedBinderType, test.podName, test.extenders, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testGetBinderFunc(expectedBinderType, podName string, extenders []algorithm.SchedulerExtender, t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
}
|
||||
|
||||
f := &configFactory{}
|
||||
binderFunc := f.getBinderFunc(extenders)
|
||||
binder := binderFunc(pod)
|
||||
|
||||
binderType := fmt.Sprintf("%s", reflect.TypeOf(binder))
|
||||
if binderType != expectedBinderType {
|
||||
t.Errorf("Expected binder %q but got %q", expectedBinderType, binderType)
|
||||
}
|
||||
}
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// PluginFactoryArgs are passed to all plugin factory functions.
|
||||
@ -41,6 +41,7 @@ type PluginFactoryArgs struct {
|
||||
ReplicaSetLister algorithm.ReplicaSetLister
|
||||
StatefulSetLister algorithm.StatefulSetLister
|
||||
NodeLister algorithm.NodeLister
|
||||
PDBLister algorithm.PDBLister
|
||||
NodeInfo predicates.NodeInfo
|
||||
PVInfo predicates.PersistentVolumeInfo
|
||||
PVCInfo predicates.PersistentVolumeClaimInfo
|
||||
@ -166,6 +167,17 @@ func InsertPredicateKeyToAlgorithmProviderMap(key string) {
|
||||
return
|
||||
}
|
||||
|
||||
// InsertPriorityKeyToAlgorithmProviderMap inserts a priority function to all algorithmProviders which are in algorithmProviderMap.
|
||||
func InsertPriorityKeyToAlgorithmProviderMap(key string) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
for _, provider := range algorithmProviderMap {
|
||||
provider.PriorityFunctionKeys.Insert(key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by
|
||||
// kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was
|
||||
// registered.
|
||||
@ -221,12 +233,12 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
|
||||
}
|
||||
} else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok {
|
||||
// checking to see if a pre-defined predicate is requested
|
||||
glog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name)
|
||||
klog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name)
|
||||
return policy.Name
|
||||
}
|
||||
|
||||
if predicateFactory == nil {
|
||||
glog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name)
|
||||
klog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name)
|
||||
}
|
||||
|
||||
return RegisterFitPredicateFactory(policy.Name, predicateFactory)
|
||||
@ -333,7 +345,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
|
||||
}
|
||||
}
|
||||
} else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok {
|
||||
glog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name)
|
||||
klog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name)
|
||||
// set/update the weight based on the policy
|
||||
pcf = &PriorityConfigFactory{
|
||||
Function: existingPcf.Function,
|
||||
@ -343,7 +355,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
|
||||
}
|
||||
|
||||
if pcf == nil {
|
||||
glog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name)
|
||||
klog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name)
|
||||
}
|
||||
|
||||
return RegisterPriorityConfigFactory(policy.Name, *pcf)
|
||||
@ -357,7 +369,7 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s
|
||||
}
|
||||
shape, err := priorities.NewFunctionShape(points)
|
||||
if err != nil {
|
||||
glog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
|
||||
klog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
|
||||
}
|
||||
return shape
|
||||
}
|
||||
@ -488,7 +500,7 @@ var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$")
|
||||
|
||||
func validateAlgorithmNameOrDie(name string) {
|
||||
if !validName.MatchString(name) {
|
||||
glog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
|
||||
klog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,7 +514,7 @@ func validatePredicateOrDie(predicate schedulerapi.PredicatePolicy) {
|
||||
numArgs++
|
||||
}
|
||||
if numArgs != 1 {
|
||||
glog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name)
|
||||
klog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -520,7 +532,7 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {
|
||||
numArgs++
|
||||
}
|
||||
if numArgs != 1 {
|
||||
glog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name)
|
||||
klog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
36
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins_test.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins_test.go
generated
vendored
@ -36,14 +36,18 @@ func TestAlgorithmNameValidation(t *testing.T) {
|
||||
"Some,Alg:orithm",
|
||||
}
|
||||
for _, name := range algorithmNamesShouldValidate {
|
||||
if !validName.MatchString(name) {
|
||||
t.Errorf("%v should be a valid algorithm name but is not valid.", name)
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !validName.MatchString(name) {
|
||||
t.Errorf("should be a valid algorithm name but is not valid.")
|
||||
}
|
||||
})
|
||||
}
|
||||
for _, name := range algorithmNamesShouldNotValidate {
|
||||
if validName.MatchString(name) {
|
||||
t.Errorf("%v should be an invalid algorithm name but is valid.", name)
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if validName.MatchString(name) {
|
||||
t.Errorf("should be an invalid algorithm name but is valid.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,16 +74,18 @@ func TestValidatePriorityConfigOverFlow(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
err := validateSelectedConfigs(test.configs)
|
||||
if test.expected {
|
||||
if err == nil {
|
||||
t.Errorf("Expected Overflow for %s", test.description)
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
err := validateSelectedConfigs(test.configs)
|
||||
if test.expected {
|
||||
if err == nil {
|
||||
t.Errorf("Expected Overflow")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an overflow")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an overflow for %s", test.description)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
64
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cache.go",
|
||||
"interface.go",
|
||||
"node_tree.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_test.go",
|
||||
"main_test.go",
|
||||
"node_tree_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/internal/cache/debugger:all-srcs",
|
||||
"//pkg/scheduler/internal/cache/fake:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -23,12 +23,13 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -51,14 +52,16 @@ type schedulerCache struct {
|
||||
period time.Duration
|
||||
|
||||
// This mutex guards all fields within this cache struct.
|
||||
mu sync.Mutex
|
||||
mu sync.RWMutex
|
||||
// a set of assumed pod keys.
|
||||
// The key could further be used to get an entry in podStates.
|
||||
assumedPods map[string]bool
|
||||
// a map from pod key to podState.
|
||||
podStates map[string]*podState
|
||||
nodes map[string]*NodeInfo
|
||||
pdbs map[string]*policy.PodDisruptionBudget
|
||||
nodes map[string]*schedulercache.NodeInfo
|
||||
nodeTree *NodeTree
|
||||
// A map from image name to its imageState.
|
||||
imageStates map[string]*imageState
|
||||
}
|
||||
|
||||
type podState struct {
|
||||
@ -69,26 +72,42 @@ type podState struct {
|
||||
bindingFinished bool
|
||||
}
|
||||
|
||||
type imageState struct {
|
||||
// Size of the image
|
||||
size int64
|
||||
// A set of node names for nodes having this image present
|
||||
nodes sets.String
|
||||
}
|
||||
|
||||
// createImageStateSummary returns a summarizing snapshot of the given image's state.
|
||||
func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulercache.ImageStateSummary {
|
||||
return &schedulercache.ImageStateSummary{
|
||||
Size: state.size,
|
||||
NumNodes: len(state.nodes),
|
||||
}
|
||||
}
|
||||
|
||||
func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedulerCache {
|
||||
return &schedulerCache{
|
||||
ttl: ttl,
|
||||
period: period,
|
||||
stop: stop,
|
||||
|
||||
nodes: make(map[string]*NodeInfo),
|
||||
nodes: make(map[string]*schedulercache.NodeInfo),
|
||||
nodeTree: newNodeTree(nil),
|
||||
assumedPods: make(map[string]bool),
|
||||
podStates: make(map[string]*podState),
|
||||
pdbs: make(map[string]*policy.PodDisruptionBudget),
|
||||
imageStates: make(map[string]*imageState),
|
||||
}
|
||||
}
|
||||
|
||||
// Snapshot takes a snapshot of the current schedulerCache. The method has performance impact,
|
||||
// Snapshot takes a snapshot of the current schedulerinternalcache. The method has performance impact,
|
||||
// and should be only used in non-critical path.
|
||||
func (cache *schedulerCache) Snapshot() *Snapshot {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
|
||||
nodes := make(map[string]*NodeInfo)
|
||||
nodes := make(map[string]*schedulercache.NodeInfo)
|
||||
for k, v := range cache.nodes {
|
||||
nodes[k] = v.Clone()
|
||||
}
|
||||
@ -98,27 +117,22 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
|
||||
assumedPods[k] = v
|
||||
}
|
||||
|
||||
pdbs := make(map[string]*policy.PodDisruptionBudget)
|
||||
for k, v := range cache.pdbs {
|
||||
pdbs[k] = v.DeepCopy()
|
||||
}
|
||||
|
||||
return &Snapshot{
|
||||
Nodes: nodes,
|
||||
AssumedPods: assumedPods,
|
||||
Pdbs: pdbs,
|
||||
}
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*NodeInfo) error {
|
||||
func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*schedulercache.NodeInfo) error {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
for name, info := range cache.nodes {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && info.TransientInfo != nil {
|
||||
// Transient scheduler info is reset here.
|
||||
info.TransientInfo.resetTransientSchedulerInfo()
|
||||
info.TransientInfo.ResetTransientSchedulerInfo()
|
||||
}
|
||||
if current, ok := nodeNameToInfo[name]; !ok || current.generation != info.generation {
|
||||
if current, ok := nodeNameToInfo[name]; !ok || current.GetGeneration() != info.GetGeneration() {
|
||||
nodeNameToInfo[name] = info.Clone()
|
||||
}
|
||||
}
|
||||
@ -136,18 +150,18 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
// podFilter is expected to return true for most or all of the pods. We
|
||||
// can avoid expensive array growth without wasting too much memory by
|
||||
// pre-allocating capacity.
|
||||
maxSize := 0
|
||||
for _, info := range cache.nodes {
|
||||
maxSize += len(info.pods)
|
||||
maxSize += len(info.Pods())
|
||||
}
|
||||
pods := make([]*v1.Pod, 0, maxSize)
|
||||
for _, info := range cache.nodes {
|
||||
for _, pod := range info.pods {
|
||||
for _, pod := range info.Pods() {
|
||||
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
@ -157,7 +171,7 @@ func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.S
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
|
||||
key, err := getPodKey(pod)
|
||||
key, err := schedulercache.GetPodKey(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -183,15 +197,15 @@ func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error {
|
||||
|
||||
// finishBinding exists to make tests determinitistic by injecting now as an argument
|
||||
func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
|
||||
key, err := getPodKey(pod)
|
||||
key, err := schedulercache.GetPodKey(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
|
||||
glog.V(5).Infof("Finished binding for pod %v. Can be expired.", key)
|
||||
klog.V(5).Infof("Finished binding for pod %v. Can be expired.", key)
|
||||
currState, ok := cache.podStates[key]
|
||||
if ok && cache.assumedPods[key] {
|
||||
dl := now.Add(cache.ttl)
|
||||
@ -202,7 +216,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
|
||||
key, err := getPodKey(pod)
|
||||
key, err := schedulercache.GetPodKey(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -234,7 +248,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
|
||||
func (cache *schedulerCache) addPod(pod *v1.Pod) {
|
||||
n, ok := cache.nodes[pod.Spec.NodeName]
|
||||
if !ok {
|
||||
n = NewNodeInfo()
|
||||
n = schedulercache.NewNodeInfo()
|
||||
cache.nodes[pod.Spec.NodeName] = n
|
||||
}
|
||||
n.AddPod(pod)
|
||||
@ -255,14 +269,14 @@ func (cache *schedulerCache) removePod(pod *v1.Pod) error {
|
||||
if err := n.RemovePod(pod); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(n.pods) == 0 && n.node == nil {
|
||||
if len(n.Pods()) == 0 && n.Node() == nil {
|
||||
delete(cache.nodes, pod.Spec.NodeName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
|
||||
key, err := getPodKey(pod)
|
||||
key, err := schedulercache.GetPodKey(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -275,7 +289,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
|
||||
case ok && cache.assumedPods[key]:
|
||||
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
|
||||
// The pod was added to a different node than it was assumed to.
|
||||
glog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
|
||||
klog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
|
||||
// Clean this up.
|
||||
cache.removePod(currState.pod)
|
||||
cache.addPod(pod)
|
||||
@ -297,7 +311,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
|
||||
key, err := getPodKey(oldPod)
|
||||
key, err := schedulercache.GetPodKey(oldPod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -311,12 +325,13 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
|
||||
// before Update event, in which case the state would change from Assumed to Added.
|
||||
case ok && !cache.assumedPods[key]:
|
||||
if currState.pod.Spec.NodeName != newPod.Spec.NodeName {
|
||||
glog.Errorf("Pod %v updated on a different node than previously added to.", key)
|
||||
glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
|
||||
klog.Errorf("Pod %v updated on a different node than previously added to.", key)
|
||||
klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
|
||||
}
|
||||
if err := cache.updatePod(oldPod, newPod); err != nil {
|
||||
return err
|
||||
}
|
||||
currState.pod = newPod
|
||||
default:
|
||||
return fmt.Errorf("pod %v is not added to scheduler cache, so cannot be updated", key)
|
||||
}
|
||||
@ -324,7 +339,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
|
||||
key, err := getPodKey(pod)
|
||||
key, err := schedulercache.GetPodKey(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -338,8 +353,8 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
|
||||
// before Remove event, in which case the state would change from Assumed to Added.
|
||||
case ok && !cache.assumedPods[key]:
|
||||
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
|
||||
glog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
|
||||
glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
|
||||
klog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
|
||||
klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
|
||||
}
|
||||
err := cache.removePod(currState.pod)
|
||||
if err != nil {
|
||||
@ -353,13 +368,13 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
|
||||
key, err := getPodKey(pod)
|
||||
key, err := schedulercache.GetPodKey(pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
|
||||
b, found := cache.assumedPods[key]
|
||||
if !found {
|
||||
@ -369,13 +384,13 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
|
||||
key, err := getPodKey(pod)
|
||||
key, err := schedulercache.GetPodKey(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
|
||||
podState, ok := cache.podStates[key]
|
||||
if !ok {
|
||||
@ -391,9 +406,14 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error {
|
||||
|
||||
n, ok := cache.nodes[node.Name]
|
||||
if !ok {
|
||||
n = NewNodeInfo()
|
||||
n = schedulercache.NewNodeInfo()
|
||||
cache.nodes[node.Name] = n
|
||||
} else {
|
||||
cache.removeNodeImageStates(n.Node())
|
||||
}
|
||||
|
||||
cache.nodeTree.AddNode(node)
|
||||
cache.addNodeImageStates(node, n)
|
||||
return n.SetNode(node)
|
||||
}
|
||||
|
||||
@ -403,9 +423,14 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
|
||||
|
||||
n, ok := cache.nodes[newNode.Name]
|
||||
if !ok {
|
||||
n = NewNodeInfo()
|
||||
n = schedulercache.NewNodeInfo()
|
||||
cache.nodes[newNode.Name] = n
|
||||
} else {
|
||||
cache.removeNodeImageStates(n.Node())
|
||||
}
|
||||
|
||||
cache.nodeTree.UpdateNode(oldNode, newNode)
|
||||
cache.addNodeImageStates(newNode, n)
|
||||
return n.SetNode(newNode)
|
||||
}
|
||||
|
||||
@ -421,50 +446,64 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
|
||||
// We can't do it unconditionally, because notifications about pods are delivered
|
||||
// in a different watch, and thus can potentially be observed later, even though
|
||||
// they happened before node removal.
|
||||
if len(n.pods) == 0 && n.node == nil {
|
||||
if len(n.Pods()) == 0 && n.Node() == nil {
|
||||
delete(cache.nodes, node.Name)
|
||||
}
|
||||
|
||||
cache.nodeTree.RemoveNode(node)
|
||||
cache.removeNodeImageStates(node)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) AddPDB(pdb *policy.PodDisruptionBudget) error {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in
|
||||
// scheduler cache. This function assumes the lock to scheduler cache has been acquired.
|
||||
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulercache.NodeInfo) {
|
||||
newSum := make(map[string]*schedulercache.ImageStateSummary)
|
||||
|
||||
// Unconditionally update cache.
|
||||
cache.pdbs[string(pdb.UID)] = pdb
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error {
|
||||
return cache.AddPDB(newPDB)
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) RemovePDB(pdb *policy.PodDisruptionBudget) error {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
delete(cache.pdbs, string(pdb.UID))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
var pdbs []*policy.PodDisruptionBudget
|
||||
for _, pdb := range cache.pdbs {
|
||||
if selector.Matches(labels.Set(pdb.Labels)) {
|
||||
pdbs = append(pdbs, pdb)
|
||||
for _, image := range node.Status.Images {
|
||||
for _, name := range image.Names {
|
||||
// update the entry in imageStates
|
||||
state, ok := cache.imageStates[name]
|
||||
if !ok {
|
||||
state = &imageState{
|
||||
size: image.SizeBytes,
|
||||
nodes: sets.NewString(node.Name),
|
||||
}
|
||||
cache.imageStates[name] = state
|
||||
} else {
|
||||
state.nodes.Insert(node.Name)
|
||||
}
|
||||
// create the imageStateSummary for this image
|
||||
if _, ok := newSum[name]; !ok {
|
||||
newSum[name] = cache.createImageStateSummary(state)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pdbs, nil
|
||||
nodeInfo.SetImageStates(newSum)
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) IsUpToDate(n *NodeInfo) bool {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
node, ok := cache.nodes[n.Node().Name]
|
||||
return ok && n.generation == node.generation
|
||||
// removeNodeImageStates removes the given node record from image entries having the node
|
||||
// in imageStates cache. After the removal, if any image becomes free, i.e., the image
|
||||
// is no longer available on any node, the image entry will be removed from imageStates.
|
||||
func (cache *schedulerCache) removeNodeImageStates(node *v1.Node) {
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, image := range node.Status.Images {
|
||||
for _, name := range image.Names {
|
||||
state, ok := cache.imageStates[name]
|
||||
if ok {
|
||||
state.nodes.Delete(node.Name)
|
||||
if len(state.nodes) == 0 {
|
||||
// Remove the unused image to make sure the length of
|
||||
// imageStates represents the total number of different
|
||||
// images on all nodes
|
||||
delete(cache.imageStates, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) run() {
|
||||
@ -487,14 +526,14 @@ func (cache *schedulerCache) cleanupAssumedPods(now time.Time) {
|
||||
panic("Key found in assumed set but not in podStates. Potentially a logical error.")
|
||||
}
|
||||
if !ps.bindingFinished {
|
||||
glog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.",
|
||||
klog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.",
|
||||
ps.pod.Namespace, ps.pod.Name)
|
||||
continue
|
||||
}
|
||||
if now.After(*ps.deadline) {
|
||||
glog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name)
|
||||
klog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name)
|
||||
if err := cache.expirePod(key, ps); err != nil {
|
||||
glog.Errorf("ExpirePod failed for %s: %v", key, err)
|
||||
klog.Errorf("ExpirePod failed for %s: %v", key, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -508,3 +547,7 @@ func (cache *schedulerCache) expirePod(key string, ps *podState) error {
|
||||
delete(cache.podStates, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) NodeTree() *NodeTree {
|
||||
return cache.nodeTree
|
||||
}
|
@ -24,23 +24,24 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *NodeInfo) {
|
||||
func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *schedulercache.NodeInfo) {
|
||||
// Ignore generation field.
|
||||
if actual != nil {
|
||||
actual.generation = 0
|
||||
actual.SetGeneration(0)
|
||||
}
|
||||
if expected != nil {
|
||||
expected.SetGeneration(0)
|
||||
}
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("#%d: node info get=%s, want=%s", testcase, actual, expected)
|
||||
@ -65,19 +66,34 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *hostPortInfoBuilder) build() schedutil.HostPortInfo {
|
||||
res := make(schedutil.HostPortInfo)
|
||||
func (b *hostPortInfoBuilder) build() schedulercache.HostPortInfo {
|
||||
res := make(schedulercache.HostPortInfo)
|
||||
for _, param := range b.inputs {
|
||||
res.Add(param.ip, param.protocol, param.port)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func newNodeInfo(requestedResource *schedulercache.Resource,
|
||||
nonzeroRequest *schedulercache.Resource,
|
||||
pods []*v1.Pod,
|
||||
usedPorts schedulercache.HostPortInfo,
|
||||
imageStates map[string]*schedulercache.ImageStateSummary,
|
||||
) *schedulercache.NodeInfo {
|
||||
nodeInfo := schedulercache.NewNodeInfo(pods...)
|
||||
nodeInfo.SetRequestedResource(requestedResource)
|
||||
nodeInfo.SetNonZeroRequest(nonzeroRequest)
|
||||
nodeInfo.SetUsedPorts(usedPorts)
|
||||
nodeInfo.SetImageStates(imageStates)
|
||||
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
// TestAssumePodScheduled tests that after a pod is assumed, its information is aggregated
|
||||
// on node level.
|
||||
func TestAssumePodScheduled(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
nodeName := "node"
|
||||
testPods := []*v1.Pod{
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
@ -92,111 +108,99 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
tests := []struct {
|
||||
pods []*v1.Pod
|
||||
|
||||
wNodeInfo *NodeInfo
|
||||
wNodeInfo *schedulercache.NodeInfo
|
||||
}{{
|
||||
pods: []*v1.Pod{testPods[0]},
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[0]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{testPods[0]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
pods: []*v1.Pod{testPods[1], testPods[2]},
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[1], testPods[2]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{testPods[1], testPods[2]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}, { // test non-zero request
|
||||
pods: []*v1.Pod{testPods[3]},
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 0,
|
||||
Memory: 0,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: priorityutil.DefaultMilliCPURequest,
|
||||
Memory: priorityutil.DefaultMemoryRequest,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[3]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{testPods[3]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
pods: []*v1.Pod{testPods[4]},
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3},
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[4]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{testPods[4]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
pods: []*v1.Pod{testPods[4], testPods[5]},
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8},
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[4], testPods[5]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{testPods[4], testPods[5]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
pods: []*v1.Pod{testPods[6]},
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[6]},
|
||||
usedPorts: newHostPortInfoBuilder().build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{testPods[6]},
|
||||
newHostPortInfoBuilder().build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
@ -237,7 +241,7 @@ func assumeAndFinishBinding(cache *schedulerCache, pod *v1.Pod, assumedTime time
|
||||
// The removal will be reflected in node info.
|
||||
func TestExpirePod(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
nodeName := "node"
|
||||
testPods := []*v1.Pod{
|
||||
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
@ -249,7 +253,7 @@ func TestExpirePod(t *testing.T) {
|
||||
pods []*testExpirePodStruct
|
||||
cleanupTime time.Time
|
||||
|
||||
wNodeInfo *NodeInfo
|
||||
wNodeInfo *schedulercache.NodeInfo
|
||||
}{{ // assumed pod would expires
|
||||
pods: []*testExpirePodStruct{
|
||||
{pod: testPods[0], assumedTime: now},
|
||||
@ -262,21 +266,19 @@ func TestExpirePod(t *testing.T) {
|
||||
{pod: testPods[1], assumedTime: now.Add(3 * ttl / 2)},
|
||||
},
|
||||
cleanupTime: now.Add(2 * ttl),
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[1]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{testPods[1]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
|
||||
for i, tt := range tests {
|
||||
@ -298,7 +300,7 @@ func TestExpirePod(t *testing.T) {
|
||||
// The pod info should still exist after manually expiring unconfirmed pods.
|
||||
func TestAddPodWillConfirm(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
nodeName := "node"
|
||||
now := time.Now()
|
||||
ttl := 10 * time.Second
|
||||
@ -311,25 +313,23 @@ func TestAddPodWillConfirm(t *testing.T) {
|
||||
podsToAssume []*v1.Pod
|
||||
podsToAdd []*v1.Pod
|
||||
|
||||
wNodeInfo *NodeInfo
|
||||
wNodeInfo *schedulercache.NodeInfo
|
||||
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
|
||||
podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[0]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{testPods[0]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
|
||||
for i, tt := range tests {
|
||||
@ -405,28 +405,26 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate [][]*v1.Pod
|
||||
|
||||
wNodeInfo map[string]*NodeInfo
|
||||
wNodeInfo map[string]*schedulercache.NodeInfo
|
||||
}{{
|
||||
podsToAssume: []*v1.Pod{assumedPod.DeepCopy()},
|
||||
podsToAdd: []*v1.Pod{addedPod.DeepCopy()},
|
||||
podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}},
|
||||
wNodeInfo: map[string]*NodeInfo{
|
||||
wNodeInfo: map[string]*schedulercache.NodeInfo{
|
||||
"assumed-node": nil,
|
||||
"actual-node": {
|
||||
requestedResource: &Resource{
|
||||
"actual-node": newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{updatedPod.DeepCopy()},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{updatedPod.DeepCopy()},
|
||||
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
}}
|
||||
|
||||
@ -458,31 +456,29 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
|
||||
// TestAddPodAfterExpiration tests that a pod being Add()ed will be added back if expired.
|
||||
func TestAddPodAfterExpiration(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
nodeName := "node"
|
||||
ttl := 10 * time.Second
|
||||
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
|
||||
wNodeInfo *NodeInfo
|
||||
wNodeInfo *schedulercache.NodeInfo
|
||||
}{{
|
||||
pod: basePod,
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{basePod},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{basePod},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
|
||||
now := time.Now()
|
||||
@ -509,7 +505,7 @@ func TestAddPodAfterExpiration(t *testing.T) {
|
||||
// TestUpdatePod tests that a pod will be updated if added before.
|
||||
func TestUpdatePod(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
nodeName := "node"
|
||||
ttl := 10 * time.Second
|
||||
testPods := []*v1.Pod{
|
||||
@ -520,39 +516,35 @@ func TestUpdatePod(t *testing.T) {
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate []*v1.Pod
|
||||
|
||||
wNodeInfo []*NodeInfo
|
||||
wNodeInfo []*schedulercache.NodeInfo
|
||||
}{{ // add a pod and then update it twice
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
|
||||
wNodeInfo: []*NodeInfo{{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: []*schedulercache.NodeInfo{newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[1]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
}, {
|
||||
requestedResource: &Resource{
|
||||
[]*v1.Pod{testPods[1]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
), newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[0]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
}},
|
||||
[]*v1.Pod{testPods[0]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
)},
|
||||
}}
|
||||
|
||||
for _, tt := range tests {
|
||||
@ -577,10 +569,69 @@ func TestUpdatePod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdatePodAndGet tests get always return latest pod state
|
||||
func TestUpdatePodAndGet(t *testing.T) {
|
||||
nodeName := "node"
|
||||
ttl := 10 * time.Second
|
||||
testPods := []*v1.Pod{
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
|
||||
podToUpdate *v1.Pod
|
||||
handler func(cache Cache, pod *v1.Pod) error
|
||||
|
||||
assumePod bool
|
||||
}{
|
||||
{
|
||||
pod: testPods[0],
|
||||
|
||||
podToUpdate: testPods[0],
|
||||
handler: func(cache Cache, pod *v1.Pod) error {
|
||||
return cache.AssumePod(pod)
|
||||
},
|
||||
assumePod: true,
|
||||
},
|
||||
{
|
||||
pod: testPods[0],
|
||||
|
||||
podToUpdate: testPods[1],
|
||||
handler: func(cache Cache, pod *v1.Pod) error {
|
||||
return cache.AddPod(pod)
|
||||
},
|
||||
assumePod: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
cache := newSchedulerCache(ttl, time.Second, nil)
|
||||
|
||||
if err := tt.handler(cache, tt.pod); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if !tt.assumePod {
|
||||
if err := cache.UpdatePod(tt.pod, tt.podToUpdate); err != nil {
|
||||
t.Fatalf("UpdatePod failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cachedPod, err := cache.GetPod(tt.pod)
|
||||
if err != nil {
|
||||
t.Fatalf("GetPod failed: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.podToUpdate, cachedPod) {
|
||||
t.Fatalf("pod get=%s, want=%s", cachedPod, tt.podToUpdate)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestExpireAddUpdatePod test the sequence that a pod is expired, added, then updated
|
||||
func TestExpireAddUpdatePod(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
nodeName := "node"
|
||||
ttl := 10 * time.Second
|
||||
testPods := []*v1.Pod{
|
||||
@ -592,40 +643,36 @@ func TestExpireAddUpdatePod(t *testing.T) {
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate []*v1.Pod
|
||||
|
||||
wNodeInfo []*NodeInfo
|
||||
wNodeInfo []*schedulercache.NodeInfo
|
||||
}{{ // Pod is assumed, expired, and added. Then it would be updated twice.
|
||||
podsToAssume: []*v1.Pod{testPods[0]},
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
|
||||
wNodeInfo: []*NodeInfo{{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: []*schedulercache.NodeInfo{newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[1]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
}, {
|
||||
requestedResource: &Resource{
|
||||
[]*v1.Pod{testPods[1]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
), newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{testPods[0]},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
}},
|
||||
[]*v1.Pod{testPods[0]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
)},
|
||||
}}
|
||||
|
||||
now := time.Now()
|
||||
@ -681,29 +728,27 @@ func makePodWithEphemeralStorage(nodeName, ephemeralStorage string) *v1.Pod {
|
||||
|
||||
func TestEphemeralStorageResource(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
nodeName := "node"
|
||||
podE := makePodWithEphemeralStorage(nodeName, "500")
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
wNodeInfo *NodeInfo
|
||||
wNodeInfo *schedulercache.NodeInfo
|
||||
}{
|
||||
{
|
||||
pod: podE,
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
EphemeralStorage: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: priorityutil.DefaultMilliCPURequest,
|
||||
Memory: priorityutil.DefaultMemoryRequest,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{podE},
|
||||
usedPorts: schedutil.HostPortInfo{},
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{podE},
|
||||
schedulercache.HostPortInfo{},
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
@ -728,29 +773,27 @@ func TestEphemeralStorageResource(t *testing.T) {
|
||||
// TestRemovePod tests after added pod is removed, its information should also be subtracted.
|
||||
func TestRemovePod(t *testing.T) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
nodeName := "node"
|
||||
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
wNodeInfo *NodeInfo
|
||||
wNodeInfo *schedulercache.NodeInfo
|
||||
}{{
|
||||
pod: basePod,
|
||||
wNodeInfo: &NodeInfo{
|
||||
requestedResource: &Resource{
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
&schedulercache.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
TransientInfo: newTransientSchedulerInfo(),
|
||||
allocatableResource: &Resource{},
|
||||
pods: []*v1.Pod{basePod},
|
||||
usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
imageSizes: map[string]int64{},
|
||||
},
|
||||
[]*v1.Pod{basePod},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulercache.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
|
||||
for i, tt := range tests {
|
||||
@ -829,7 +872,7 @@ func TestForgetPod(t *testing.T) {
|
||||
// getResourceRequest returns the resource request of all containers in Pods;
|
||||
// excuding initContainers.
|
||||
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
|
||||
result := &Resource{}
|
||||
result := &schedulercache.Resource{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
result.Add(container.Resources.Requests)
|
||||
}
|
||||
@ -838,22 +881,30 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList {
|
||||
}
|
||||
|
||||
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
|
||||
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *NodeInfo {
|
||||
expected := NewNodeInfo()
|
||||
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulercache.NodeInfo {
|
||||
expected := schedulercache.NewNodeInfo()
|
||||
|
||||
// Simulate SetNode.
|
||||
expected.node = node
|
||||
expected.allocatableResource = NewResource(node.Status.Allocatable)
|
||||
expected.taints = node.Spec.Taints
|
||||
expected.generation++
|
||||
expected.SetNode(node)
|
||||
|
||||
expected.SetAllocatableResource(schedulercache.NewResource(node.Status.Allocatable))
|
||||
expected.SetTaints(node.Spec.Taints)
|
||||
expected.SetGeneration(expected.GetGeneration() + 1)
|
||||
|
||||
for _, pod := range pods {
|
||||
// Simulate AddPod
|
||||
expected.pods = append(expected.pods, pod)
|
||||
expected.requestedResource.Add(getResourceRequest(pod))
|
||||
expected.nonzeroRequest.Add(getResourceRequest(pod))
|
||||
expected.updateUsedPorts(pod, true)
|
||||
expected.generation++
|
||||
pods := append(expected.Pods(), pod)
|
||||
expected.SetPods(pods)
|
||||
requestedResource := expected.RequestedResource()
|
||||
newRequestedResource := &requestedResource
|
||||
newRequestedResource.Add(getResourceRequest(pod))
|
||||
expected.SetRequestedResource(newRequestedResource)
|
||||
nonZeroRequest := expected.NonZeroRequest()
|
||||
newNonZeroRequest := &nonZeroRequest
|
||||
newNonZeroRequest.Add(getResourceRequest(pod))
|
||||
expected.SetNonZeroRequest(newNonZeroRequest)
|
||||
expected.UpdateUsedPorts(pod, true)
|
||||
expected.SetGeneration(expected.GetGeneration() + 1)
|
||||
}
|
||||
|
||||
return expected
|
||||
@ -1004,49 +1055,65 @@ func TestNodeOperators(t *testing.T) {
|
||||
// Case 1: the node was added into cache successfully.
|
||||
got, found := cache.nodes[node.Name]
|
||||
if !found {
|
||||
t.Errorf("Failed to find node %v in schedulercache.", node.Name)
|
||||
t.Errorf("Failed to find node %v in schedulerinternalcache.", node.Name)
|
||||
}
|
||||
if cache.nodeTree.NumNodes != 1 || cache.nodeTree.Next() != node.Name {
|
||||
t.Errorf("cache.nodeTree is not updated correctly after adding node: %v", node.Name)
|
||||
}
|
||||
|
||||
// Generations are globally unique. We check in our unit tests that they are incremented correctly.
|
||||
expected.generation = got.generation
|
||||
expected.SetGeneration(got.GetGeneration())
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Errorf("Failed to add node into schedulercache:\n got: %+v \nexpected: %+v", got, expected)
|
||||
}
|
||||
|
||||
// Case 2: dump cached nodes successfully.
|
||||
cachedNodes := map[string]*NodeInfo{}
|
||||
cachedNodes := map[string]*schedulercache.NodeInfo{}
|
||||
cache.UpdateNodeNameToInfoMap(cachedNodes)
|
||||
newNode, found := cachedNodes[node.Name]
|
||||
if !found || len(cachedNodes) != 1 {
|
||||
t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes, cache.nodes)
|
||||
}
|
||||
expected.generation = newNode.generation
|
||||
expected.SetGeneration(newNode.GetGeneration())
|
||||
if !reflect.DeepEqual(newNode, expected) {
|
||||
t.Errorf("Failed to clone node:\n got: %+v, \n expected: %+v", newNode, expected)
|
||||
}
|
||||
|
||||
// Case 3: update node attribute successfully.
|
||||
node.Status.Allocatable[v1.ResourceMemory] = mem50m
|
||||
expected.allocatableResource.Memory = mem50m.Value()
|
||||
allocatableResource := expected.AllocatableResource()
|
||||
newAllocatableResource := &allocatableResource
|
||||
newAllocatableResource.Memory = mem50m.Value()
|
||||
expected.SetAllocatableResource(newAllocatableResource)
|
||||
|
||||
cache.UpdateNode(nil, node)
|
||||
got, found = cache.nodes[node.Name]
|
||||
if !found {
|
||||
t.Errorf("Failed to find node %v in schedulercache after UpdateNode.", node.Name)
|
||||
}
|
||||
if got.generation <= expected.generation {
|
||||
t.Errorf("generation is not incremented. got: %v, expected: %v", got.generation, expected.generation)
|
||||
if got.GetGeneration() <= expected.GetGeneration() {
|
||||
t.Errorf("Generation is not incremented. got: %v, expected: %v", got.GetGeneration(), expected.GetGeneration())
|
||||
}
|
||||
expected.generation = got.generation
|
||||
expected.SetGeneration(got.GetGeneration())
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Errorf("Failed to update node in schedulercache:\n got: %+v \nexpected: %+v", got, expected)
|
||||
}
|
||||
// Check nodeTree after update
|
||||
if cache.nodeTree.NumNodes != 1 || cache.nodeTree.Next() != node.Name {
|
||||
t.Errorf("unexpected cache.nodeTree after updating node: %v", node.Name)
|
||||
}
|
||||
|
||||
// Case 4: the node can not be removed if pods is not empty.
|
||||
cache.RemoveNode(node)
|
||||
if _, found := cache.nodes[node.Name]; !found {
|
||||
t.Errorf("The node %v should not be removed if pods is not empty.", node.Name)
|
||||
}
|
||||
// Check nodeTree after remove. The node should be removed from the nodeTree even if there are
|
||||
// still pods on it.
|
||||
if cache.nodeTree.NumNodes != 0 || cache.nodeTree.Next() != "" {
|
||||
t.Errorf("unexpected cache.nodeTree after removing node: %v", node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1060,25 +1127,27 @@ func BenchmarkList1kNodes30kPods(b *testing.B) {
|
||||
|
||||
func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
|
||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(nil, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||
cache := setupCacheOf1kNodes30kPods(b)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
cachedNodes := map[string]*NodeInfo{}
|
||||
cachedNodes := map[string]*schedulercache.NodeInfo{}
|
||||
cache.UpdateNodeNameToInfoMap(cachedNodes)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpire100Pods(b *testing.B) {
|
||||
benchmarkExpire(b, 100)
|
||||
}
|
||||
|
||||
func BenchmarkExpire1kPods(b *testing.B) {
|
||||
benchmarkExpire(b, 1000)
|
||||
}
|
||||
|
||||
func BenchmarkExpire10kPods(b *testing.B) {
|
||||
benchmarkExpire(b, 10000)
|
||||
func BenchmarkExpirePods(b *testing.B) {
|
||||
podNums := []int{
|
||||
100,
|
||||
1000,
|
||||
10000,
|
||||
}
|
||||
for _, podNum := range podNums {
|
||||
name := fmt.Sprintf("%dPods", podNum)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkExpire(b, podNum)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkExpire(b *testing.B, podNum int) {
|
||||
@ -1158,131 +1227,3 @@ func setupCacheWithAssumedPods(b *testing.B, podNum int, assumedTime time.Time)
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func makePDB(name, namespace string, uid types.UID, labels map[string]string, minAvailable int) *v1beta1.PodDisruptionBudget {
|
||||
intstrMin := intstr.FromInt(minAvailable)
|
||||
pdb := &v1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
UID: uid,
|
||||
},
|
||||
Spec: v1beta1.PodDisruptionBudgetSpec{
|
||||
MinAvailable: &intstrMin,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labels},
|
||||
},
|
||||
}
|
||||
|
||||
return pdb
|
||||
}
|
||||
|
||||
// TestPDBOperations tests that a PDB will be add/updated/deleted correctly.
|
||||
func TestPDBOperations(t *testing.T) {
|
||||
ttl := 10 * time.Second
|
||||
testPDBs := []*v1beta1.PodDisruptionBudget{
|
||||
makePDB("pdb0", "ns1", "uid0", map[string]string{"tkey1": "tval1"}, 3),
|
||||
makePDB("pdb1", "ns1", "uid1", map[string]string{"tkey1": "tval1", "tkey2": "tval2"}, 1),
|
||||
makePDB("pdb2", "ns3", "uid2", map[string]string{"tkey3": "tval3", "tkey2": "tval2"}, 10),
|
||||
}
|
||||
updatedPDBs := []*v1beta1.PodDisruptionBudget{
|
||||
makePDB("pdb0", "ns1", "uid0", map[string]string{"tkey4": "tval4"}, 8),
|
||||
makePDB("pdb1", "ns1", "uid1", map[string]string{"tkey1": "tval1"}, 1),
|
||||
makePDB("pdb2", "ns3", "uid2", map[string]string{"tkey3": "tval3", "tkey1": "tval1", "tkey2": "tval2"}, 10),
|
||||
}
|
||||
tests := []struct {
|
||||
pdbsToAdd []*v1beta1.PodDisruptionBudget
|
||||
pdbsToUpdate []*v1beta1.PodDisruptionBudget
|
||||
pdbsToDelete []*v1beta1.PodDisruptionBudget
|
||||
expectedPDBs []*v1beta1.PodDisruptionBudget // Expected PDBs after all operations
|
||||
}{
|
||||
{
|
||||
pdbsToAdd: []*v1beta1.PodDisruptionBudget{testPDBs[0]},
|
||||
pdbsToUpdate: []*v1beta1.PodDisruptionBudget{testPDBs[0], testPDBs[1], testPDBs[0]},
|
||||
expectedPDBs: []*v1beta1.PodDisruptionBudget{testPDBs[0], testPDBs[1]}, // both will be in the cache as they have different names
|
||||
},
|
||||
{
|
||||
pdbsToAdd: []*v1beta1.PodDisruptionBudget{testPDBs[0]},
|
||||
pdbsToUpdate: []*v1beta1.PodDisruptionBudget{testPDBs[0], updatedPDBs[0]},
|
||||
expectedPDBs: []*v1beta1.PodDisruptionBudget{updatedPDBs[0]},
|
||||
},
|
||||
{
|
||||
pdbsToAdd: []*v1beta1.PodDisruptionBudget{testPDBs[0], testPDBs[2]},
|
||||
pdbsToUpdate: []*v1beta1.PodDisruptionBudget{testPDBs[0], updatedPDBs[0]},
|
||||
pdbsToDelete: []*v1beta1.PodDisruptionBudget{testPDBs[0]},
|
||||
expectedPDBs: []*v1beta1.PodDisruptionBudget{testPDBs[2]},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
cache := newSchedulerCache(ttl, time.Second, nil)
|
||||
for _, pdbToAdd := range test.pdbsToAdd {
|
||||
if err := cache.AddPDB(pdbToAdd); err != nil {
|
||||
t.Fatalf("AddPDB failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range test.pdbsToUpdate {
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
if err := cache.UpdatePDB(test.pdbsToUpdate[i-1], test.pdbsToUpdate[i]); err != nil {
|
||||
t.Fatalf("UpdatePDB failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pdb := range test.pdbsToDelete {
|
||||
if err := cache.RemovePDB(pdb); err != nil {
|
||||
t.Fatalf("RemovePDB failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cachedPDBs, err := cache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("ListPDBs failed: %v", err)
|
||||
}
|
||||
if len(cachedPDBs) != len(test.expectedPDBs) {
|
||||
t.Errorf("Expected %d PDBs, got %d", len(test.expectedPDBs), len(cachedPDBs))
|
||||
}
|
||||
for _, pdb := range test.expectedPDBs {
|
||||
found := false
|
||||
// find it among the cached ones
|
||||
for _, cpdb := range cachedPDBs {
|
||||
if pdb.UID == cpdb.UID {
|
||||
found = true
|
||||
if !reflect.DeepEqual(pdb, cpdb) {
|
||||
t.Errorf("%v is not equal to %v", pdb, cpdb)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("PDB with uid '%v' was not found in the cache.", pdb.UID)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsUpToDate(t *testing.T) {
|
||||
cache := New(time.Duration(0), wait.NeverStop)
|
||||
if err := cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n1"}}); err != nil {
|
||||
t.Errorf("Could not add node: %v", err)
|
||||
}
|
||||
s := cache.Snapshot()
|
||||
node := s.Nodes["n1"]
|
||||
if !cache.IsUpToDate(node) {
|
||||
t.Errorf("Node incorrectly marked as stale")
|
||||
}
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", UID: "p1"}, Spec: v1.PodSpec{NodeName: "n1"}}
|
||||
if err := cache.AddPod(pod); err != nil {
|
||||
t.Errorf("Could not add pod: %v", err)
|
||||
}
|
||||
if cache.IsUpToDate(node) {
|
||||
t.Errorf("Node incorrectly marked as up to date")
|
||||
}
|
||||
badNode := &NodeInfo{node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n2"}}}
|
||||
if cache.IsUpToDate(badNode) {
|
||||
t.Errorf("Nonexistant node incorrectly marked as up to date")
|
||||
}
|
||||
}
|
46
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/BUILD
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/BUILD
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"comparer.go",
|
||||
"debugger.go",
|
||||
"dumper.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger",
|
||||
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||
deps = [
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["comparer_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -14,74 +14,61 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
package debugger
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
v1beta1 "k8s.io/client-go/listers/policy/v1beta1"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
)
|
||||
|
||||
type cacheComparer struct {
|
||||
nodeLister corelisters.NodeLister
|
||||
podLister corelisters.PodLister
|
||||
pdbLister v1beta1.PodDisruptionBudgetLister
|
||||
cache schedulercache.Cache
|
||||
podQueue core.SchedulingQueue
|
||||
|
||||
compareStrategy
|
||||
// CacheComparer is an implementation of the Scheduler's cache comparer.
|
||||
type CacheComparer struct {
|
||||
NodeLister corelisters.NodeLister
|
||||
PodLister corelisters.PodLister
|
||||
Cache schedulerinternalcache.Cache
|
||||
PodQueue internalqueue.SchedulingQueue
|
||||
}
|
||||
|
||||
func (c *cacheComparer) Compare() error {
|
||||
glog.V(3).Info("cache comparer started")
|
||||
defer glog.V(3).Info("cache comparer finished")
|
||||
// Compare compares the nodes and pods of NodeLister with Cache.Snapshot.
|
||||
func (c *CacheComparer) Compare() error {
|
||||
klog.V(3).Info("cache comparer started")
|
||||
defer klog.V(3).Info("cache comparer finished")
|
||||
|
||||
nodes, err := c.nodeLister.List(labels.Everything())
|
||||
nodes, err := c.NodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pods, err := c.podLister.List(labels.Everything())
|
||||
pods, err := c.PodLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pdbs, err := c.pdbLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
snapshot := c.Cache.Snapshot()
|
||||
|
||||
snapshot := c.cache.Snapshot()
|
||||
|
||||
waitingPods := c.podQueue.WaitingPods()
|
||||
waitingPods := c.PodQueue.WaitingPods()
|
||||
|
||||
if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant)
|
||||
klog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant)
|
||||
}
|
||||
|
||||
if missed, redundant := c.ComparePods(pods, waitingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant)
|
||||
}
|
||||
|
||||
if missed, redundant := c.ComparePdbs(pdbs, snapshot.Pdbs); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed pdbs: %s; redundant pdbs: %s", missed, redundant)
|
||||
klog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type compareStrategy struct {
|
||||
}
|
||||
|
||||
func (c compareStrategy) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
|
||||
// CompareNodes compares actual nodes with cached nodes.
|
||||
func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, node := range nodes {
|
||||
actual = append(actual, node.Name)
|
||||
@ -95,7 +82,8 @@ func (c compareStrategy) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*sc
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func (c compareStrategy) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
|
||||
// ComparePods compares actual pods with cached pods.
|
||||
func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, pod := range pods {
|
||||
actual = append(actual, string(pod.UID))
|
||||
@ -114,20 +102,6 @@ func (c compareStrategy) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func (c compareStrategy) ComparePdbs(pdbs []*policy.PodDisruptionBudget, pdbCache map[string]*policy.PodDisruptionBudget) (missed, redundant []string) {
|
||||
actual := []string{}
|
||||
for _, pdb := range pdbs {
|
||||
actual = append(actual, string(pdb.UID))
|
||||
}
|
||||
|
||||
cached := []string{}
|
||||
for pdbUID := range pdbCache {
|
||||
cached = append(cached, pdbUID)
|
||||
}
|
||||
|
||||
return compareStrings(actual, cached)
|
||||
}
|
||||
|
||||
func compareStrings(actual, cached []string) (missed, redundant []string) {
|
||||
missed, redundant = []string{}, []string{}
|
||||
|
192
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer_test.go
generated
vendored
Normal file
192
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer_test.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package debugger
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
func TestCompareNodes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
actual []string
|
||||
cached []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
name: "redundant cached value",
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
name: "missing cached value",
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
name: "proper cache set",
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testCompareNodes(test.actual, test.cached, test.missing, test.redundant, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testCompareNodes(actual, cached, missing, redundant []string, t *testing.T) {
|
||||
compare := CacheComparer{}
|
||||
nodes := []*v1.Node{}
|
||||
for _, nodeName := range actual {
|
||||
node := &v1.Node{}
|
||||
node.Name = nodeName
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
nodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
for _, nodeName := range cached {
|
||||
nodeInfo[nodeName] = &schedulercache.NodeInfo{}
|
||||
}
|
||||
|
||||
m, r := compare.CompareNodes(nodes, nodeInfo)
|
||||
|
||||
if !reflect.DeepEqual(m, missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", redundant, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparePods(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
actual []string
|
||||
cached []string
|
||||
queued []string
|
||||
missing []string
|
||||
redundant []string
|
||||
}{
|
||||
{
|
||||
name: "redundant cached value",
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"bar", "foo", "foobar"},
|
||||
queued: []string{},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
name: "redundant and queued values",
|
||||
actual: []string{"foo", "bar"},
|
||||
cached: []string{"foo", "foobar"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{},
|
||||
redundant: []string{"foobar"},
|
||||
},
|
||||
{
|
||||
name: "missing cached value",
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foo"},
|
||||
queued: []string{},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
name: "missing and queued values",
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"foo"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{"foobar"},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
name: "correct cache set",
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"bar", "foobar", "foo"},
|
||||
queued: []string{},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
{
|
||||
name: "queued cache value",
|
||||
actual: []string{"foo", "bar", "foobar"},
|
||||
cached: []string{"foobar", "foo"},
|
||||
queued: []string{"bar"},
|
||||
missing: []string{},
|
||||
redundant: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testComparePods(test.actual, test.cached, test.queued, test.missing, test.redundant, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testComparePods(actual, cached, queued, missing, redundant []string, t *testing.T) {
|
||||
compare := CacheComparer{}
|
||||
pods := []*v1.Pod{}
|
||||
for _, uid := range actual {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
queuedPods := []*v1.Pod{}
|
||||
for _, uid := range queued {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
queuedPods = append(queuedPods, pod)
|
||||
}
|
||||
|
||||
nodeInfo := make(map[string]*schedulercache.NodeInfo)
|
||||
for _, uid := range cached {
|
||||
pod := &v1.Pod{}
|
||||
pod.UID = types.UID(uid)
|
||||
pod.Namespace = "ns"
|
||||
pod.Name = uid
|
||||
|
||||
nodeInfo[uid] = schedulercache.NewNodeInfo(pod)
|
||||
}
|
||||
|
||||
m, r := compare.ComparePods(pods, queuedPods, nodeInfo)
|
||||
|
||||
if !reflect.DeepEqual(m, missing) {
|
||||
t.Errorf("missing expected to be %s; got %s", missing, m)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r, redundant) {
|
||||
t.Errorf("redundant expected to be %s; got %s", redundant, r)
|
||||
}
|
||||
}
|
50
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package debugger
|
||||
|
||||
import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
)
|
||||
|
||||
// CacheDebugger provides ways to check and write cache information for debugging.
|
||||
type CacheDebugger struct {
|
||||
Comparer CacheComparer
|
||||
Dumper CacheDumper
|
||||
}
|
||||
|
||||
// New creates a CacheDebugger.
|
||||
func New(
|
||||
nodeLister corelisters.NodeLister,
|
||||
podLister corelisters.PodLister,
|
||||
cache internalcache.Cache,
|
||||
podQueue internalqueue.SchedulingQueue,
|
||||
) *CacheDebugger {
|
||||
return &CacheDebugger{
|
||||
Comparer: CacheComparer{
|
||||
NodeLister: nodeLister,
|
||||
PodLister: podLister,
|
||||
Cache: cache,
|
||||
PodQueue: podQueue,
|
||||
},
|
||||
Dumper: CacheDumper{
|
||||
cache: cache,
|
||||
podQueue: podQueue,
|
||||
},
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user