Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -3,8 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"cache.go",
"interface.go",
"host_ports.go",
"node_info.go",
"util.go",
],
@ -12,40 +11,28 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"cache_test.go",
"host_ports_test.go",
"node_info_test.go",
"util_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//pkg/util/parsers:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -1,510 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"sync"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"github.com/golang/glog"
policy "k8s.io/api/policy/v1beta1"
)
var (
cleanAssumedPeriod = 1 * time.Second
)
// New returns a Cache implementation.
// It automatically starts a go routine that manages expiration of assumed pods.
// "ttl" is how long the assumed pod will get expired.
// "stop" is the channel that would close the background goroutine.
func New(ttl time.Duration, stop <-chan struct{}) Cache {
cache := newSchedulerCache(ttl, cleanAssumedPeriod, stop)
cache.run()
return cache
}
type schedulerCache struct {
stop <-chan struct{}
ttl time.Duration
period time.Duration
// This mutex guards all fields within this cache struct.
mu sync.Mutex
// a set of assumed pod keys.
// The key could further be used to get an entry in podStates.
assumedPods map[string]bool
// a map from pod key to podState.
podStates map[string]*podState
nodes map[string]*NodeInfo
pdbs map[string]*policy.PodDisruptionBudget
}
type podState struct {
pod *v1.Pod
// Used by assumedPod to determinate expiration.
deadline *time.Time
// Used to block cache from expiring assumedPod if binding still runs
bindingFinished bool
}
func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedulerCache {
return &schedulerCache{
ttl: ttl,
period: period,
stop: stop,
nodes: make(map[string]*NodeInfo),
assumedPods: make(map[string]bool),
podStates: make(map[string]*podState),
pdbs: make(map[string]*policy.PodDisruptionBudget),
}
}
// Snapshot takes a snapshot of the current schedulerCache. The method has performance impact,
// and should be only used in non-critical path.
func (cache *schedulerCache) Snapshot() *Snapshot {
cache.mu.Lock()
defer cache.mu.Unlock()
nodes := make(map[string]*NodeInfo)
for k, v := range cache.nodes {
nodes[k] = v.Clone()
}
assumedPods := make(map[string]bool)
for k, v := range cache.assumedPods {
assumedPods[k] = v
}
pdbs := make(map[string]*policy.PodDisruptionBudget)
for k, v := range cache.pdbs {
pdbs[k] = v.DeepCopy()
}
return &Snapshot{
Nodes: nodes,
AssumedPods: assumedPods,
Pdbs: pdbs,
}
}
func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*NodeInfo) error {
cache.mu.Lock()
defer cache.mu.Unlock()
for name, info := range cache.nodes {
if utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && info.TransientInfo != nil {
// Transient scheduler info is reset here.
info.TransientInfo.resetTransientSchedulerInfo()
}
if current, ok := nodeNameToInfo[name]; !ok || current.generation != info.generation {
nodeNameToInfo[name] = info.Clone()
}
}
for name := range nodeNameToInfo {
if _, ok := cache.nodes[name]; !ok {
delete(nodeNameToInfo, name)
}
}
return nil
}
func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
alwaysTrue := func(p *v1.Pod) bool { return true }
return cache.FilteredList(alwaysTrue, selector)
}
func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
cache.mu.Lock()
defer cache.mu.Unlock()
// podFilter is expected to return true for most or all of the pods. We
// can avoid expensive array growth without wasting too much memory by
// pre-allocating capacity.
maxSize := 0
for _, info := range cache.nodes {
maxSize += len(info.pods)
}
pods := make([]*v1.Pod, 0, maxSize)
for _, info := range cache.nodes {
for _, pod := range info.pods {
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) {
pods = append(pods, pod)
}
}
}
return pods, nil
}
func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
key, err := getPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
if _, ok := cache.podStates[key]; ok {
return fmt.Errorf("pod %v is in the cache, so can't be assumed", key)
}
cache.addPod(pod)
ps := &podState{
pod: pod,
}
cache.podStates[key] = ps
cache.assumedPods[key] = true
return nil
}
func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error {
return cache.finishBinding(pod, time.Now())
}
// finishBinding exists to make tests determinitistic by injecting now as an argument
func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
key, err := getPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
glog.V(5).Infof("Finished binding for pod %v. Can be expired.", key)
currState, ok := cache.podStates[key]
if ok && cache.assumedPods[key] {
dl := now.Add(cache.ttl)
currState.bindingFinished = true
currState.deadline = &dl
}
return nil
}
func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
key, err := getPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
currState, ok := cache.podStates[key]
if ok && currState.pod.Spec.NodeName != pod.Spec.NodeName {
return fmt.Errorf("pod %v was assumed on %v but assigned to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
}
switch {
// Only assumed pod can be forgotten.
case ok && cache.assumedPods[key]:
err := cache.removePod(pod)
if err != nil {
return err
}
delete(cache.assumedPods, key)
delete(cache.podStates, key)
default:
return fmt.Errorf("pod %v wasn't assumed so cannot be forgotten", key)
}
return nil
}
// Assumes that lock is already acquired.
func (cache *schedulerCache) addPod(pod *v1.Pod) {
n, ok := cache.nodes[pod.Spec.NodeName]
if !ok {
n = NewNodeInfo()
cache.nodes[pod.Spec.NodeName] = n
}
n.AddPod(pod)
}
// Assumes that lock is already acquired.
func (cache *schedulerCache) updatePod(oldPod, newPod *v1.Pod) error {
if err := cache.removePod(oldPod); err != nil {
return err
}
cache.addPod(newPod)
return nil
}
// Assumes that lock is already acquired.
func (cache *schedulerCache) removePod(pod *v1.Pod) error {
n := cache.nodes[pod.Spec.NodeName]
if err := n.RemovePod(pod); err != nil {
return err
}
if len(n.pods) == 0 && n.node == nil {
delete(cache.nodes, pod.Spec.NodeName)
}
return nil
}
func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
key, err := getPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
currState, ok := cache.podStates[key]
switch {
case ok && cache.assumedPods[key]:
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
// The pod was added to a different node than it was assumed to.
glog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
// Clean this up.
cache.removePod(currState.pod)
cache.addPod(pod)
}
delete(cache.assumedPods, key)
cache.podStates[key].deadline = nil
cache.podStates[key].pod = pod
case !ok:
// Pod was expired. We should add it back.
cache.addPod(pod)
ps := &podState{
pod: pod,
}
cache.podStates[key] = ps
default:
return fmt.Errorf("pod %v was already in added state", key)
}
return nil
}
func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
key, err := getPodKey(oldPod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
currState, ok := cache.podStates[key]
switch {
// An assumed pod won't have Update/Remove event. It needs to have Add event
// before Update event, in which case the state would change from Assumed to Added.
case ok && !cache.assumedPods[key]:
if currState.pod.Spec.NodeName != newPod.Spec.NodeName {
glog.Errorf("Pod %v updated on a different node than previously added to.", key)
glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
}
if err := cache.updatePod(oldPod, newPod); err != nil {
return err
}
default:
return fmt.Errorf("pod %v is not added to scheduler cache, so cannot be updated", key)
}
return nil
}
func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
key, err := getPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
currState, ok := cache.podStates[key]
switch {
// An assumed pod won't have Delete/Remove event. It needs to have Add event
// before Remove event, in which case the state would change from Assumed to Added.
case ok && !cache.assumedPods[key]:
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
glog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
}
err := cache.removePod(currState.pod)
if err != nil {
return err
}
delete(cache.podStates, key)
default:
return fmt.Errorf("pod %v is not found in scheduler cache, so cannot be removed from it", key)
}
return nil
}
func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
key, err := getPodKey(pod)
if err != nil {
return false, err
}
cache.mu.Lock()
defer cache.mu.Unlock()
b, found := cache.assumedPods[key]
if !found {
return false, nil
}
return b, nil
}
func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
key, err := getPodKey(pod)
if err != nil {
return nil, err
}
cache.mu.Lock()
defer cache.mu.Unlock()
podState, ok := cache.podStates[key]
if !ok {
return nil, fmt.Errorf("pod %v does not exist in scheduler cache", key)
}
return podState.pod, nil
}
func (cache *schedulerCache) AddNode(node *v1.Node) error {
cache.mu.Lock()
defer cache.mu.Unlock()
n, ok := cache.nodes[node.Name]
if !ok {
n = NewNodeInfo()
cache.nodes[node.Name] = n
}
return n.SetNode(node)
}
func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
cache.mu.Lock()
defer cache.mu.Unlock()
n, ok := cache.nodes[newNode.Name]
if !ok {
n = NewNodeInfo()
cache.nodes[newNode.Name] = n
}
return n.SetNode(newNode)
}
func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
cache.mu.Lock()
defer cache.mu.Unlock()
n := cache.nodes[node.Name]
if err := n.RemoveNode(node); err != nil {
return err
}
// We remove NodeInfo for this node only if there aren't any pods on this node.
// We can't do it unconditionally, because notifications about pods are delivered
// in a different watch, and thus can potentially be observed later, even though
// they happened before node removal.
if len(n.pods) == 0 && n.node == nil {
delete(cache.nodes, node.Name)
}
return nil
}
func (cache *schedulerCache) AddPDB(pdb *policy.PodDisruptionBudget) error {
cache.mu.Lock()
defer cache.mu.Unlock()
// Unconditionally update cache.
cache.pdbs[string(pdb.UID)] = pdb
return nil
}
func (cache *schedulerCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error {
return cache.AddPDB(newPDB)
}
func (cache *schedulerCache) RemovePDB(pdb *policy.PodDisruptionBudget) error {
cache.mu.Lock()
defer cache.mu.Unlock()
delete(cache.pdbs, string(pdb.UID))
return nil
}
func (cache *schedulerCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) {
cache.mu.Lock()
defer cache.mu.Unlock()
var pdbs []*policy.PodDisruptionBudget
for _, pdb := range cache.pdbs {
if selector.Matches(labels.Set(pdb.Labels)) {
pdbs = append(pdbs, pdb)
}
}
return pdbs, nil
}
func (cache *schedulerCache) IsUpToDate(n *NodeInfo) bool {
cache.mu.Lock()
defer cache.mu.Unlock()
node, ok := cache.nodes[n.Node().Name]
return ok && n.generation == node.generation
}
func (cache *schedulerCache) run() {
go wait.Until(cache.cleanupExpiredAssumedPods, cache.period, cache.stop)
}
func (cache *schedulerCache) cleanupExpiredAssumedPods() {
cache.cleanupAssumedPods(time.Now())
}
// cleanupAssumedPods exists for making test deterministic by taking time as input argument.
func (cache *schedulerCache) cleanupAssumedPods(now time.Time) {
cache.mu.Lock()
defer cache.mu.Unlock()
// The size of assumedPods should be small
for key := range cache.assumedPods {
ps, ok := cache.podStates[key]
if !ok {
panic("Key found in assumed set but not in podStates. Potentially a logical error.")
}
if !ps.bindingFinished {
glog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.",
ps.pod.Namespace, ps.pod.Name)
continue
}
if now.After(*ps.deadline) {
glog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name)
if err := cache.expirePod(key, ps); err != nil {
glog.Errorf("ExpirePod failed for %s: %v", key, err)
}
}
}
}
func (cache *schedulerCache) expirePod(key string, ps *podState) error {
if err := cache.removePod(ps.pod); err != nil {
return err
}
delete(cache.assumedPods, key)
delete(cache.podStates, key)
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,135 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/api/core/v1"
)
// DefaultBindAllHostIP defines the default ip address used to bind to all host.
const DefaultBindAllHostIP = "0.0.0.0"
// ProtocolPort represents a protocol port pair, e.g. tcp:80.
type ProtocolPort struct {
Protocol string
Port int32
}
// NewProtocolPort creates a ProtocolPort instance.
func NewProtocolPort(protocol string, port int32) *ProtocolPort {
pp := &ProtocolPort{
Protocol: protocol,
Port: port,
}
if len(pp.Protocol) == 0 {
pp.Protocol = string(v1.ProtocolTCP)
}
return pp
}
// HostPortInfo stores mapping from ip to a set of ProtocolPort
type HostPortInfo map[string]map[ProtocolPort]struct{}
// Add adds (ip, protocol, port) to HostPortInfo
func (h HostPortInfo) Add(ip, protocol string, port int32) {
if port <= 0 {
return
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
if _, ok := h[ip]; !ok {
h[ip] = map[ProtocolPort]struct{}{
*pp: {},
}
return
}
h[ip][*pp] = struct{}{}
}
// Remove removes (ip, protocol, port) from HostPortInfo
func (h HostPortInfo) Remove(ip, protocol string, port int32) {
if port <= 0 {
return
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
if m, ok := h[ip]; ok {
delete(m, *pp)
if len(h[ip]) == 0 {
delete(h, ip)
}
}
}
// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo
func (h HostPortInfo) Len() int {
length := 0
for _, m := range h {
length += len(m)
}
return length
}
// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing
// ones in HostPortInfo.
func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool {
if port <= 0 {
return false
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
// If ip is 0.0.0.0 check all IP's (protocol, port) pair
if ip == DefaultBindAllHostIP {
for _, m := range h {
if _, ok := m[*pp]; ok {
return true
}
}
return false
}
// If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair
for _, key := range []string{DefaultBindAllHostIP, ip} {
if m, ok := h[key]; ok {
if _, ok2 := m[*pp]; ok2 {
return true
}
}
}
return false
}
// sanitize the parameters
func (h HostPortInfo) sanitize(ip, protocol *string) {
if len(*ip) == 0 {
*ip = DefaultBindAllHostIP
}
if len(*protocol) == 0 {
*protocol = string(v1.ProtocolTCP)
}
}

View File

@ -0,0 +1,231 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
)
type hostPortInfoParam struct {
protocol, ip string
port int32
}
func TestHostPortInfo_AddRemove(t *testing.T) {
tests := []struct {
desc string
added []hostPortInfoParam
removed []hostPortInfoParam
length int
}{
{
desc: "normal add case",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
// this might not make sense in real case, but the struct doesn't forbid it.
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
{"TCP", "0.0.0.0", 0},
{"TCP", "0.0.0.0", -1},
},
length: 8,
},
{
desc: "empty ip and protocol add should work",
added: []hostPortInfoParam{
{"", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"", "127.0.0.1", 81},
{"", "127.0.0.1", 82},
{"", "", 79},
{"UDP", "", 80},
{"", "", 81},
{"", "", 82},
{"", "", 0},
{"", "", -1},
},
length: 8,
},
{
desc: "normal remove case",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
removed: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
length: 0,
},
{
desc: "empty ip and protocol remove should work",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
removed: []hostPortInfoParam{
{"", "127.0.0.1", 79},
{"", "127.0.0.1", 81},
{"", "127.0.0.1", 82},
{"UDP", "127.0.0.1", 80},
{"", "", 79},
{"", "", 81},
{"", "", 82},
{"UDP", "", 80},
},
length: 0,
},
}
for _, test := range tests {
hp := make(HostPortInfo)
for _, param := range test.added {
hp.Add(param.ip, param.protocol, param.port)
}
for _, param := range test.removed {
hp.Remove(param.ip, param.protocol, param.port)
}
if hp.Len() != test.length {
t.Errorf("%v failed: expect length %d; got %d", test.desc, test.length, hp.Len())
t.Error(hp)
}
}
}
func TestHostPortInfo_Check(t *testing.T) {
tests := []struct {
desc string
added []hostPortInfoParam
check hostPortInfoParam
expect bool
}{
{
desc: "empty check should check 0.0.0.0 and TCP",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 81},
expect: false,
},
{
desc: "empty check should check 0.0.0.0 and TCP (conflicted)",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 80},
expect: true,
},
{
desc: "empty port check should pass",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 0},
expect: false,
},
{
desc: "0.0.0.0 should check all registered IPs",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: true,
},
{
desc: "0.0.0.0 with different protocol should be allowed",
added: []hostPortInfoParam{
{"UDP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: false,
},
{
desc: "0.0.0.0 with different port should be allowed",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: false,
},
{
desc: "normal ip should check all registered 0.0.0.0",
added: []hostPortInfoParam{
{"TCP", "0.0.0.0", 80},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: true,
},
{
desc: "normal ip with different port/protocol should be allowed (0.0.0.0)",
added: []hostPortInfoParam{
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: false,
},
{
desc: "normal ip with different port/protocol should be allowed",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: false,
},
}
for _, test := range tests {
hp := make(HostPortInfo)
for _, param := range test.added {
hp.Add(param.ip, param.protocol, param.port)
}
if hp.CheckConflict(test.check.ip, test.check.protocol, test.check.port) != test.expect {
t.Errorf("%v failed, expected %t; got %t", test.desc, test.expect, !test.expect)
}
}
}

View File

@ -1,135 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/labels"
)
// PodFilter is a function to filter a pod. If pod passed return true else return false.
type PodFilter func(*v1.Pod) bool
// Cache collects pods' information and provides node-level aggregated information.
// It's intended for generic scheduler to do efficient lookup.
// Cache's operations are pod centric. It does incremental updates based on pod events.
// Pod events are sent via network. We don't have guaranteed delivery of all events:
// We use Reflector to list and watch from remote.
// Reflector might be slow and do a relist, which would lead to missing events.
//
// State Machine of a pod's events in scheduler's cache:
//
//
// +-------------------------------------------+ +----+
// | Add | | |
// | | | | Update
// + Assume Add v v |
//Initial +--------> Assumed +------------+---> Added <--+
// ^ + + | +
// | | | | |
// | | | Add | | Remove
// | | | | |
// | | | + |
// +----------------+ +-----------> Expired +----> Deleted
// Forget Expire
//
//
// Note that an assumed pod can expire, because if we haven't received Add event notifying us
// for a while, there might be some problems and we shouldn't keep the pod in cache anymore.
//
// Note that "Initial", "Expired", and "Deleted" pods do not actually exist in cache.
// Based on existing use cases, we are making the following assumptions:
// - No pod would be assumed twice
// - A pod could be added without going through scheduler. In this case, we will see Add but not Assume event.
// - If a pod wasn't added, it wouldn't be removed or updated.
// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue,
// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache.
type Cache interface {
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event).
// After expiration, its information would be subtracted.
AssumePod(pod *v1.Pod) error
// FinishBinding signals that cache for assumed pod can be expired
FinishBinding(pod *v1.Pod) error
// ForgetPod removes an assumed pod from cache.
ForgetPod(pod *v1.Pod) error
// AddPod either confirms a pod if it's assumed, or adds it back if it's expired.
// If added back, the pod's information would be added again.
AddPod(pod *v1.Pod) error
// UpdatePod removes oldPod's information and adds newPod's information.
UpdatePod(oldPod, newPod *v1.Pod) error
// RemovePod removes a pod. The pod's information would be subtracted from assigned node.
RemovePod(pod *v1.Pod) error
// GetPod returns the pod from the cache with the same namespace and the
// same name of the specified pod.
GetPod(pod *v1.Pod) (*v1.Pod, error)
// IsAssumedPod returns true if the pod is assumed and not expired.
IsAssumedPod(pod *v1.Pod) (bool, error)
// AddNode adds overall information about node.
AddNode(node *v1.Node) error
// UpdateNode updates overall information about node.
UpdateNode(oldNode, newNode *v1.Node) error
// RemoveNode removes overall information about node.
RemoveNode(node *v1.Node) error
// AddPDB adds a PodDisruptionBudget object to the cache.
AddPDB(pdb *policy.PodDisruptionBudget) error
// UpdatePDB updates a PodDisruptionBudget object in the cache.
UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error
// RemovePDB removes a PodDisruptionBudget object from the cache.
RemovePDB(pdb *policy.PodDisruptionBudget) error
// List lists all cached PDBs matching the selector.
ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error)
// UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache.
// The node info contains aggregated information of pods scheduled (including assumed to be)
// on this node.
UpdateNodeNameToInfoMap(infoMap map[string]*NodeInfo) error
// List lists all cached pods (including assumed ones).
List(labels.Selector) ([]*v1.Pod, error)
// FilteredList returns all cached pods that pass the filter.
FilteredList(filter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
// Snapshot takes a snapshot on current cache
Snapshot() *Snapshot
// IsUpToDate returns true if the given NodeInfo matches the current data in the cache.
IsUpToDate(n *NodeInfo) bool
}
// Snapshot is a snapshot of cache state
type Snapshot struct {
AssumedPods map[string]bool
Nodes map[string]*NodeInfo
Pdbs map[string]*policy.PodDisruptionBudget
}

View File

@ -22,13 +22,12 @@ import (
"sync"
"sync/atomic"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
"k8s.io/kubernetes/pkg/scheduler/util"
)
var (
@ -36,6 +35,14 @@ var (
generation int64
)
// ImageStateSummary provides summarized information about the state of an image.
type ImageStateSummary struct {
// Size of the image
Size int64
// Used to track how many nodes have this image
NumNodes int
}
// NodeInfo is node level aggregated information.
type NodeInfo struct {
// Overall node information.
@ -43,7 +50,7 @@ type NodeInfo struct {
pods []*v1.Pod
podsWithAffinity []*v1.Pod
usedPorts util.HostPortInfo
usedPorts HostPortInfo
// Total requested resource of all pods on this node.
// It includes assumed pods which scheduler sends binding to apiserver but
@ -58,14 +65,15 @@ type NodeInfo struct {
taints []v1.Taint
taintsErr error
// This is a map from image name to image size, also for checking image existence on the node
// Cache it here to avoid rebuilding the map during scheduling, e.g., in image_locality.go
imageSizes map[string]int64
// imageStates holds the entry of an image if and only if this image is on the node. The entry can be used for
// checking an image's existence and advanced usage (e.g., image locality scheduling policy) based on the image
// state information.
imageStates map[string]*ImageStateSummary
// TransientInfo holds the information pertaining to a scheduling cycle. This will be destructed at the end of
// scheduling cycle.
// TODO: @ravig. Remove this once we have a clear approach for message passing across predicates and priorities.
TransientInfo *transientSchedulerInfo
TransientInfo *TransientSchedulerInfo
// Cached conditions of node for faster lookup.
memoryPressureCondition v1.ConditionStatus
@ -98,28 +106,28 @@ type nodeTransientInfo struct {
RequestedVolumes int
}
// transientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle.
// TransientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle.
// It consists of items that are valid for a scheduling cycle and is used for message passing across predicates and
// priorities. Some examples which could be used as fields are number of volumes being used on node, current utilization
// on node etc.
// IMPORTANT NOTE: Make sure that each field in this structure is documented along with usage. Expand this structure
// only when absolutely needed as this data structure will be created and destroyed during every scheduling cycle.
type transientSchedulerInfo struct {
type TransientSchedulerInfo struct {
TransientLock sync.Mutex
// NodeTransInfo holds the information related to nodeTransientInformation. NodeName is the key here.
TransNodeInfo nodeTransientInfo
}
// newTransientSchedulerInfo returns a new scheduler transient structure with initialized values.
func newTransientSchedulerInfo() *transientSchedulerInfo {
tsi := &transientSchedulerInfo{
// NewTransientSchedulerInfo returns a new scheduler transient structure with initialized values.
func NewTransientSchedulerInfo() *TransientSchedulerInfo {
tsi := &TransientSchedulerInfo{
TransNodeInfo: initializeNodeTransientInfo(),
}
return tsi
}
// resetTransientSchedulerInfo resets the transientSchedulerInfo.
func (transientSchedInfo *transientSchedulerInfo) resetTransientSchedulerInfo() {
// ResetTransientSchedulerInfo resets the TransientSchedulerInfo.
func (transientSchedInfo *TransientSchedulerInfo) ResetTransientSchedulerInfo() {
transientSchedInfo.TransientLock.Lock()
defer transientSchedInfo.TransientLock.Unlock()
// Reset TransientNodeInfo.
@ -258,10 +266,10 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
requestedResource: &Resource{},
nonzeroRequest: &Resource{},
allocatableResource: &Resource{},
TransientInfo: newTransientSchedulerInfo(),
TransientInfo: NewTransientSchedulerInfo(),
generation: nextGeneration(),
usedPorts: make(util.HostPortInfo),
imageSizes: make(map[string]int64),
usedPorts: make(HostPortInfo),
imageStates: make(map[string]*ImageStateSummary),
}
for _, pod := range pods {
ni.AddPod(pod)
@ -285,20 +293,35 @@ func (n *NodeInfo) Pods() []*v1.Pod {
return n.pods
}
// SetPods sets all pods scheduled (including assumed to be) on this node.
func (n *NodeInfo) SetPods(pods []*v1.Pod) {
n.pods = pods
}
// UsedPorts returns used ports on this node.
func (n *NodeInfo) UsedPorts() util.HostPortInfo {
func (n *NodeInfo) UsedPorts() HostPortInfo {
if n == nil {
return nil
}
return n.usedPorts
}
// ImageSizes returns the image size information on this node.
func (n *NodeInfo) ImageSizes() map[string]int64 {
// SetUsedPorts sets the used ports on this node.
func (n *NodeInfo) SetUsedPorts(newUsedPorts HostPortInfo) {
n.usedPorts = newUsedPorts
}
// ImageStates returns the state information of all images.
func (n *NodeInfo) ImageStates() map[string]*ImageStateSummary {
if n == nil {
return nil
}
return n.imageSizes
return n.imageStates
}
// SetImageStates sets the state information of all images.
func (n *NodeInfo) SetImageStates(newImageStates map[string]*ImageStateSummary) {
n.imageStates = newImageStates
}
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
@ -325,6 +348,11 @@ func (n *NodeInfo) Taints() ([]v1.Taint, error) {
return n.taints, n.taintsErr
}
// SetTaints sets the taints list on this node.
func (n *NodeInfo) SetTaints(newTaints []v1.Taint) {
n.taints = newTaints
}
// MemoryPressureCondition returns the memory pressure condition status on this node.
func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus {
if n == nil {
@ -357,6 +385,11 @@ func (n *NodeInfo) RequestedResource() Resource {
return *n.requestedResource
}
// SetRequestedResource sets the aggregated resource request of pods on this node.
func (n *NodeInfo) SetRequestedResource(newResource *Resource) {
n.requestedResource = newResource
}
// NonZeroRequest returns aggregated nonzero resource request of pods on this node.
func (n *NodeInfo) NonZeroRequest() Resource {
if n == nil {
@ -365,6 +398,11 @@ func (n *NodeInfo) NonZeroRequest() Resource {
return *n.nonzeroRequest
}
// SetNonZeroRequest sets the aggregated nonzero resource request of pods on this node.
func (n *NodeInfo) SetNonZeroRequest(newResource *Resource) {
n.nonzeroRequest = newResource
}
// AllocatableResource returns allocatable resources on a given node.
func (n *NodeInfo) AllocatableResource() Resource {
if n == nil {
@ -379,6 +417,19 @@ func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
n.generation = nextGeneration()
}
// GetGeneration returns the generation on this node.
func (n *NodeInfo) GetGeneration() int64 {
if n == nil {
return 0
}
return n.generation
}
// SetGeneration sets the generation on this node. This is for testing only.
func (n *NodeInfo) SetGeneration(newGeneration int64) {
n.generation = newGeneration
}
// Clone returns a copy of this node.
func (n *NodeInfo) Clone() *NodeInfo {
clone := &NodeInfo{
@ -391,16 +442,21 @@ func (n *NodeInfo) Clone() *NodeInfo {
memoryPressureCondition: n.memoryPressureCondition,
diskPressureCondition: n.diskPressureCondition,
pidPressureCondition: n.pidPressureCondition,
usedPorts: make(util.HostPortInfo),
imageSizes: n.imageSizes,
usedPorts: make(HostPortInfo),
imageStates: n.imageStates,
generation: n.generation,
}
if len(n.pods) > 0 {
clone.pods = append([]*v1.Pod(nil), n.pods...)
}
if len(n.usedPorts) > 0 {
for k, v := range n.usedPorts {
clone.usedPorts[k] = v
// HostPortInfo is a map-in-map struct
// make sure it's deep copied
for ip, portMap := range n.usedPorts {
clone.usedPorts[ip] = make(map[ProtocolPort]struct{})
for protocolPort, v := range portMap {
clone.usedPorts[ip][protocolPort] = v
}
}
}
if len(n.podsWithAffinity) > 0 {
@ -458,22 +514,22 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
}
// Consume ports when pods added.
n.updateUsedPorts(pod, true)
n.UpdateUsedPorts(pod, true)
n.generation = nextGeneration()
}
// RemovePod subtracts pod information from this NodeInfo.
func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
k1, err := getPodKey(pod)
k1, err := GetPodKey(pod)
if err != nil {
return err
}
for i := range n.podsWithAffinity {
k2, err := getPodKey(n.podsWithAffinity[i])
k2, err := GetPodKey(n.podsWithAffinity[i])
if err != nil {
glog.Errorf("Cannot get pod key, err: %v", err)
klog.Errorf("Cannot get pod key, err: %v", err)
continue
}
if k1 == k2 {
@ -484,9 +540,9 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
}
}
for i := range n.pods {
k2, err := getPodKey(n.pods[i])
k2, err := GetPodKey(n.pods[i])
if err != nil {
glog.Errorf("Cannot get pod key, err: %v", err)
klog.Errorf("Cannot get pod key, err: %v", err)
continue
}
if k1 == k2 {
@ -509,7 +565,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
n.nonzeroRequest.Memory -= non0Mem
// Release ports when remove Pods.
n.updateUsedPorts(pod, false)
n.UpdateUsedPorts(pod, false)
n.generation = nextGeneration()
@ -533,7 +589,8 @@ func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64)
return
}
func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) {
// UpdateUsedPorts updates the UsedPorts of NodeInfo.
func (n *NodeInfo) UpdateUsedPorts(pod *v1.Pod, add bool) {
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
for k := range container.Ports {
@ -547,17 +604,6 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) {
}
}
func (n *NodeInfo) updateImageSizes() {
node := n.Node()
imageSizes := make(map[string]int64)
for _, image := range node.Status.Images {
for _, name := range image.Names {
imageSizes[name] = image.SizeBytes
}
}
n.imageSizes = imageSizes
}
// SetNode sets the overall node information.
func (n *NodeInfo) SetNode(node *v1.Node) error {
n.node = node
@ -578,8 +624,7 @@ func (n *NodeInfo) SetNode(node *v1.Node) error {
// We ignore other conditions.
}
}
n.TransientInfo = newTransientSchedulerInfo()
n.updateImageSizes()
n.TransientInfo = NewTransientSchedulerInfo()
n.generation = nextGeneration()
return nil
}
@ -596,6 +641,7 @@ func (n *NodeInfo) RemoveNode(node *v1.Node) error {
n.memoryPressureCondition = v1.ConditionUnknown
n.diskPressureCondition = v1.ConditionUnknown
n.pidPressureCondition = v1.ConditionUnknown
n.imageStates = make(map[string]*ImageStateSummary)
n.generation = nextGeneration()
return nil
}
@ -619,9 +665,9 @@ func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod {
continue
}
// If pod is on the given node, add it to 'filtered' only if it is present in nodeInfo.
podKey, _ := getPodKey(p)
podKey, _ := GetPodKey(p)
for _, np := range n.Pods() {
npodkey, _ := getPodKey(np)
npodkey, _ := GetPodKey(np)
if npodkey == podKey {
filtered = append(filtered, p)
break
@ -631,8 +677,8 @@ func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod {
return filtered
}
// getPodKey returns the string key of a pod.
func getPodKey(pod *v1.Pod) (string, error) {
// GetPodKey returns the string key of a pod.
func GetPodKey(pod *v1.Pod) (string, error) {
uid := string(pod.UID)
if len(uid) == 0 {
return "", errors.New("Cannot get cache key for pod with empty UID")

View File

@ -19,14 +19,13 @@ package cache
import (
"fmt"
"reflect"
"strings"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/util/parsers"
)
func TestNewResource(t *testing.T) {
@ -241,44 +240,41 @@ func TestSetMaxResource(t *testing.T) {
}
}
func TestImageSizes(t *testing.T) {
ni := fakeNodeInfo()
ni.node = &v1.Node{
type testingMode interface {
Fatalf(format string, args ...interface{})
}
func makeBasePod(t testingMode, nodeName, objName, cpu, mem, extended string, ports []v1.ContainerPort) *v1.Pod {
req := v1.ResourceList{}
if cpu != "" {
req = v1.ResourceList{
v1.ResourceCPU: resource.MustParse(cpu),
v1.ResourceMemory: resource.MustParse(mem),
}
if extended != "" {
parts := strings.Split(extended, ":")
if len(parts) != 2 {
t.Fatalf("Invalid extended resource string: \"%s\"", extended)
}
req[v1.ResourceName(parts[0])] = resource.MustParse(parts[1])
}
}
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-node",
UID: types.UID(objName),
Namespace: "node_info_cache_test",
Name: objName,
},
Status: v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
"gcr.io/10:" + parsers.DefaultImageTag,
"gcr.io/10:v1",
},
SizeBytes: int64(10 * 1024 * 1024),
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: req,
},
{
Names: []string{
"gcr.io/50:" + parsers.DefaultImageTag,
"gcr.io/50:v1",
},
SizeBytes: int64(50 * 1024 * 1024),
},
},
Ports: ports,
}},
NodeName: nodeName,
},
}
ni.updateImageSizes()
expected := map[string]int64{
"gcr.io/10:" + parsers.DefaultImageTag: 10 * 1024 * 1024,
"gcr.io/10:v1": 10 * 1024 * 1024,
"gcr.io/50:" + parsers.DefaultImageTag: 50 * 1024 * 1024,
"gcr.io/50:v1": 50 * 1024 * 1024,
}
imageSizes := ni.ImageSizes()
if !reflect.DeepEqual(expected, imageSizes) {
t.Errorf("expected: %#v, got: %#v", expected, imageSizes)
}
}
func TestNewNodeInfo(t *testing.T) {
@ -303,16 +299,16 @@ func TestNewNodeInfo(t *testing.T) {
AllowedPodNumber: 0,
ScalarResources: map[v1.ResourceName]int64(nil),
},
TransientInfo: newTransientSchedulerInfo(),
TransientInfo: NewTransientSchedulerInfo(),
allocatableResource: &Resource{},
generation: 2,
usedPorts: util.HostPortInfo{
"127.0.0.1": map[util.ProtocolPort]struct{}{
usedPorts: HostPortInfo{
"127.0.0.1": map[ProtocolPort]struct{}{
{Protocol: "TCP", Port: 80}: {},
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@ -392,18 +388,16 @@ func TestNodeInfoClone(t *testing.T) {
nodeInfo: &NodeInfo{
requestedResource: &Resource{},
nonzeroRequest: &Resource{},
TransientInfo: newTransientSchedulerInfo(),
TransientInfo: NewTransientSchedulerInfo(),
allocatableResource: &Resource{},
generation: 2,
usedPorts: util.HostPortInfo{
"127.0.0.1": map[util.ProtocolPort]struct{}{
usedPorts: HostPortInfo{
"127.0.0.1": map[ProtocolPort]struct{}{
{Protocol: "TCP", Port: 80}: {},
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{
"gcr.io/10": 10 * 1024 * 1024,
},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@ -464,18 +458,16 @@ func TestNodeInfoClone(t *testing.T) {
expected: &NodeInfo{
requestedResource: &Resource{},
nonzeroRequest: &Resource{},
TransientInfo: newTransientSchedulerInfo(),
TransientInfo: NewTransientSchedulerInfo(),
allocatableResource: &Resource{},
generation: 2,
usedPorts: util.HostPortInfo{
"127.0.0.1": map[util.ProtocolPort]struct{}{
usedPorts: HostPortInfo{
"127.0.0.1": map[ProtocolPort]struct{}{
{Protocol: "TCP", Port: 80}: {},
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{
"gcr.io/10": 10 * 1024 * 1024,
},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@ -540,6 +532,7 @@ func TestNodeInfoClone(t *testing.T) {
ni := test.nodeInfo.Clone()
// Modify the field to check if the result is a clone of the origin one.
test.nodeInfo.generation += 10
test.nodeInfo.usedPorts.Remove("127.0.0.1", "TCP", 80)
if !reflect.DeepEqual(test.expected, ni) {
t.Errorf("expected: %#v, got: %#v", test.expected, ni)
}
@ -624,16 +617,16 @@ func TestNodeInfoAddPod(t *testing.T) {
AllowedPodNumber: 0,
ScalarResources: map[v1.ResourceName]int64(nil),
},
TransientInfo: newTransientSchedulerInfo(),
TransientInfo: NewTransientSchedulerInfo(),
allocatableResource: &Resource{},
generation: 2,
usedPorts: util.HostPortInfo{
"127.0.0.1": map[util.ProtocolPort]struct{}{
usedPorts: HostPortInfo{
"127.0.0.1": map[ProtocolPort]struct{}{
{Protocol: "TCP", Port: 80}: {},
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@ -743,16 +736,16 @@ func TestNodeInfoRemovePod(t *testing.T) {
AllowedPodNumber: 0,
ScalarResources: map[v1.ResourceName]int64(nil),
},
TransientInfo: newTransientSchedulerInfo(),
TransientInfo: NewTransientSchedulerInfo(),
allocatableResource: &Resource{},
generation: 2,
usedPorts: util.HostPortInfo{
"127.0.0.1": map[util.ProtocolPort]struct{}{
usedPorts: HostPortInfo{
"127.0.0.1": map[ProtocolPort]struct{}{
{Protocol: "TCP", Port: 80}: {},
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@ -860,15 +853,15 @@ func TestNodeInfoRemovePod(t *testing.T) {
AllowedPodNumber: 0,
ScalarResources: map[v1.ResourceName]int64(nil),
},
TransientInfo: newTransientSchedulerInfo(),
TransientInfo: NewTransientSchedulerInfo(),
allocatableResource: &Resource{},
generation: 3,
usedPorts: util.HostPortInfo{
"127.0.0.1": map[util.ProtocolPort]struct{}{
usedPorts: HostPortInfo{
"127.0.0.1": map[ProtocolPort]struct{}{
{Protocol: "TCP", Port: 8080}: {},
},
},
imageSizes: map[string]int64{},
imageStates: map[string]*ImageStateSummary{},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@ -909,7 +902,7 @@ func TestNodeInfoRemovePod(t *testing.T) {
err := ni.RemovePod(test.pod)
if err != nil {
if test.errExpected {
expectedErrorMsg := fmt.Errorf("no corresponding pod %s in pods of node %s", test.pod.Name, ni.node.Name)
expectedErrorMsg := fmt.Errorf("no corresponding pod %s in pods of node %s", test.pod.Name, ni.Node().Name)
if expectedErrorMsg == err {
t.Errorf("expected error: %v, got: %v", expectedErrorMsg, err)
}
@ -931,10 +924,10 @@ func TestNodeInfoRemovePod(t *testing.T) {
func fakeNodeInfo(pods ...*v1.Pod) *NodeInfo {
ni := NewNodeInfo(pods...)
ni.node = &v1.Node{
ni.SetNode(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "test-node",
},
}
})
return ni
}

View File

@ -16,7 +16,10 @@ limitations under the License.
package cache
import "k8s.io/api/core/v1"
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names
// and the values are the aggregated information for that node.
@ -29,11 +32,47 @@ func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeI
}
nodeNameToInfo[nodeName].AddPod(pod)
}
imageExistenceMap := createImageExistenceMap(nodes)
for _, node := range nodes {
if _, ok := nodeNameToInfo[node.Name]; !ok {
nodeNameToInfo[node.Name] = NewNodeInfo()
}
nodeNameToInfo[node.Name].SetNode(node)
nodeInfo := nodeNameToInfo[node.Name]
nodeInfo.SetNode(node)
nodeInfo.imageStates = getNodeImageStates(node, imageExistenceMap)
}
return nodeNameToInfo
}
// getNodeImageStates returns the given node's image states based on the given imageExistence map.
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*ImageStateSummary {
imageStates := make(map[string]*ImageStateSummary)
for _, image := range node.Status.Images {
for _, name := range image.Names {
imageStates[name] = &ImageStateSummary{
Size: image.SizeBytes,
NumNodes: len(imageExistenceMap[name]),
}
}
}
return imageStates
}
// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names.
func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String {
imageExistenceMap := make(map[string]sets.String)
for _, node := range nodes {
for _, image := range node.Status.Images {
for _, name := range image.Names {
if _, ok := imageExistenceMap[name]; !ok {
imageExistenceMap[name] = sets.NewString(node.Name)
} else {
imageExistenceMap[name].Insert(node.Name)
}
}
}
}
return imageExistenceMap
}

View File

@ -0,0 +1,134 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"reflect"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
const mb int64 = 1024 * 1024
func TestGetNodeImageStates(t *testing.T) {
tests := []struct {
node *v1.Node
imageExistenceMap map[string]sets.String
expected map[string]*ImageStateSummary
}{
{
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node-0"},
Status: v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
"gcr.io/10:v1",
},
SizeBytes: int64(10 * mb),
},
{
Names: []string{
"gcr.io/200:v1",
},
SizeBytes: int64(200 * mb),
},
},
},
},
imageExistenceMap: map[string]sets.String{
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
"gcr.io/200:v1": sets.NewString("node-0"),
},
expected: map[string]*ImageStateSummary{
"gcr.io/10:v1": {
Size: int64(10 * mb),
NumNodes: 2,
},
"gcr.io/200:v1": {
Size: int64(200 * mb),
NumNodes: 1,
},
},
},
}
for _, test := range tests {
imageStates := getNodeImageStates(test.node, test.imageExistenceMap)
if !reflect.DeepEqual(test.expected, imageStates) {
t.Errorf("expected: %#v, got: %#v", test.expected, imageStates)
}
}
}
func TestCreateImageExistenceMap(t *testing.T) {
tests := []struct {
nodes []*v1.Node
expected map[string]sets.String
}{
{
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: "node-0"},
Status: v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
"gcr.io/10:v1",
},
SizeBytes: int64(10 * mb),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "node-1"},
Status: v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
"gcr.io/10:v1",
},
SizeBytes: int64(10 * mb),
},
{
Names: []string{
"gcr.io/200:v1",
},
SizeBytes: int64(200 * mb),
},
},
},
},
},
expected: map[string]sets.String{
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
"gcr.io/200:v1": sets.NewString("node-1"),
},
},
}
for _, test := range tests {
imageMap := createImageExistenceMap(test.nodes)
if !reflect.DeepEqual(test.expected, imageMap) {
t.Errorf("expected: %#v, got: %#v", test.expected, imageMap)
}
}
}