vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

View File

@ -0,0 +1,71 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"cpu_assignment.go",
"cpu_manager.go",
"fake_cpu_manager.go",
"policy.go",
"policy_none.go",
"policy_static.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper/qos:go_default_library",
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
"//pkg/kubelet/cm/cpumanager/state:go_default_library",
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/cpuset:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/status:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"cpu_assignment_test.go",
"cpu_manager_test.go",
"policy_none_test.go",
"policy_static_test.go",
"policy_test.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager",
library = ":go_default_library",
deps = [
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
"//pkg/kubelet/cm/cpumanager/state:go_default_library",
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/cpuset:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/cm/cpumanager/state:all-srcs",
"//pkg/kubelet/cm/cpumanager/topology:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,6 @@
approvers:
- derekwaynecarr
- vishh
- ConnorDoyle
- sjenning
- balajismaniam

View File

@ -0,0 +1,197 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"sort"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
type cpuAccumulator struct {
topo *topology.CPUTopology
details topology.CPUDetails
numCPUsNeeded int
result cpuset.CPUSet
}
func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int) *cpuAccumulator {
return &cpuAccumulator{
topo: topo,
details: topo.CPUDetails.KeepOnly(availableCPUs),
numCPUsNeeded: numCPUs,
result: cpuset.NewCPUSet(),
}
}
func (a *cpuAccumulator) take(cpus cpuset.CPUSet) {
a.result = a.result.Union(cpus)
a.details = a.details.KeepOnly(a.details.CPUs().Difference(a.result))
a.numCPUsNeeded -= cpus.Size()
}
// Returns true if the supplied socket is fully available in `topoDetails`.
func (a *cpuAccumulator) isSocketFree(socketID int) bool {
return a.details.CPUsInSocket(socketID).Size() == a.topo.CPUsPerSocket()
}
// Returns true if the supplied core is fully available in `topoDetails`.
func (a *cpuAccumulator) isCoreFree(coreID int) bool {
return a.details.CPUsInCore(coreID).Size() == a.topo.CPUsPerCore()
}
// Returns free socket IDs as a slice sorted by:
// - socket ID, ascending.
func (a *cpuAccumulator) freeSockets() []int {
return a.details.Sockets().Filter(a.isSocketFree).ToSlice()
}
// Returns core IDs as a slice sorted by:
// - the number of whole available cores on the socket, ascending
// - socket ID, ascending
// - core ID, ascending
func (a *cpuAccumulator) freeCores() []int {
socketIDs := a.details.Sockets().ToSlice()
sort.Slice(socketIDs,
func(i, j int) bool {
iCores := a.details.CoresInSocket(socketIDs[i]).Filter(a.isCoreFree)
jCores := a.details.CoresInSocket(socketIDs[j]).Filter(a.isCoreFree)
return iCores.Size() < jCores.Size() || socketIDs[i] < socketIDs[j]
})
coreIDs := []int{}
for _, s := range socketIDs {
coreIDs = append(coreIDs, a.details.CoresInSocket(s).Filter(a.isCoreFree).ToSlice()...)
}
return coreIDs
}
// Returns CPU IDs as a slice sorted by:
// - socket affinity with result
// - number of CPUs available on the same sockett
// - number of CPUs available on the same core
// - socket ID.
// - core ID.
func (a *cpuAccumulator) freeCPUs() []int {
result := []int{}
cores := a.details.Cores().ToSlice()
sort.Slice(
cores,
func(i, j int) bool {
iCore := cores[i]
jCore := cores[j]
iCPUs := a.topo.CPUDetails.CPUsInCore(iCore).ToSlice()
jCPUs := a.topo.CPUDetails.CPUsInCore(jCore).ToSlice()
iSocket := a.topo.CPUDetails[iCPUs[0]].SocketID
jSocket := a.topo.CPUDetails[jCPUs[0]].SocketID
// Compute the number of CPUs in the result reside on the same socket
// as each core.
iSocketColoScore := a.topo.CPUDetails.CPUsInSocket(iSocket).Intersection(a.result).Size()
jSocketColoScore := a.topo.CPUDetails.CPUsInSocket(jSocket).Intersection(a.result).Size()
// Compute the number of available CPUs available on the same socket
// as each core.
iSocketFreeScore := a.details.CPUsInSocket(iSocket).Size()
jSocketFreeScore := a.details.CPUsInSocket(jSocket).Size()
// Compute the number of available CPUs on each core.
iCoreFreeScore := a.details.CPUsInCore(iCore).Size()
jCoreFreeScore := a.details.CPUsInCore(jCore).Size()
return iSocketColoScore > jSocketColoScore ||
iSocketFreeScore < jSocketFreeScore ||
iCoreFreeScore < jCoreFreeScore ||
iSocket < jSocket ||
iCore < jCore
})
// For each core, append sorted CPU IDs to result.
for _, core := range cores {
result = append(result, a.details.CPUsInCore(core).ToSlice()...)
}
return result
}
func (a *cpuAccumulator) needs(n int) bool {
return a.numCPUsNeeded >= n
}
func (a *cpuAccumulator) isSatisfied() bool {
return a.numCPUsNeeded < 1
}
func (a *cpuAccumulator) isFailed() bool {
return a.numCPUsNeeded > a.details.CPUs().Size()
}
func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int) (cpuset.CPUSet, error) {
acc := newCPUAccumulator(topo, availableCPUs, numCPUs)
if acc.isSatisfied() {
return acc.result, nil
}
if acc.isFailed() {
return cpuset.NewCPUSet(), fmt.Errorf("not enough cpus available to satisfy request")
}
// Algorithm: topology-aware best-fit
// 1. Acquire whole sockets, if available and the container requires at
// least a socket's-worth of CPUs.
for _, s := range acc.freeSockets() {
if acc.needs(acc.topo.CPUsPerSocket()) {
glog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s)
acc.take(acc.details.CPUsInSocket(s))
if acc.isSatisfied() {
return acc.result, nil
}
}
}
// 2. Acquire whole cores, if available and the container requires at least
// a core's-worth of CPUs.
for _, c := range acc.freeCores() {
if acc.needs(acc.topo.CPUsPerCore()) {
glog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c)
acc.take(acc.details.CPUsInCore(c))
if acc.isSatisfied() {
return acc.result, nil
}
}
}
// 3. Acquire single threads, preferring to fill partially-allocated cores
// on the same sockets as the whole cores we have already taken in this
// allocation.
for _, c := range acc.freeCPUs() {
glog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c)
if acc.needs(1) {
acc.take(cpuset.NewCPUSet(c))
}
if acc.isSatisfied() {
return acc.result, nil
}
}
return cpuset.NewCPUSet(), fmt.Errorf("failed to allocate cpus")
}

View File

@ -0,0 +1,385 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
func TestCPUAccumulatorFreeSockets(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
expect []int
}{
{
"single socket HT, 1 socket free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0},
},
{
"single socket HT, 0 sockets free",
topoSingleSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
[]int{},
},
{
"dual socket HT, 2 sockets free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 1},
},
{
"dual socket HT, 1 socket free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{1},
},
{
"dual socket HT, 0 sockets free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{},
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeSockets()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
}
}
func TestCPUAccumulatorFreeCores(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
expect []int
}{
{
"single socket HT, 4 cores free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0, 1, 2, 3},
},
{
"single socket HT, 3 cores free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 4, 5, 6),
[]int{0, 1, 2},
},
{
"single socket HT, 3 cores free (1 partially consumed)",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6),
[]int{0, 1, 2},
},
{
"single socket HT, 0 cores free",
topoSingleSocketHT,
cpuset.NewCPUSet(),
[]int{},
},
{
"single socket HT, 0 cores free (4 partially consumed)",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3),
[]int{},
},
{
"dual socket HT, 6 cores free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 2, 4, 1, 3, 5},
},
{
"dual socket HT, 5 cores free (1 consumed from socket 0)",
topoDualSocketHT,
cpuset.NewCPUSet(2, 1, 3, 4, 5, 7, 8, 9, 10, 11),
[]int{2, 4, 1, 3, 5},
},
{
"dual socket HT, 4 cores free (1 consumed from each socket)",
topoDualSocketHT,
cpuset.NewCPUSet(2, 3, 4, 5, 8, 9, 10, 11),
[]int{2, 4, 3, 5},
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeCores()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
}
}
func TestCPUAccumulatorFreeCPUs(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
expect []int
}{
{
"single socket HT, 8 cpus free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0, 4, 1, 5, 2, 6, 3, 7},
},
{
"single socket HT, 5 cpus free",
topoSingleSocketHT,
cpuset.NewCPUSet(3, 4, 5, 6, 7),
[]int{4, 5, 6, 3, 7},
},
{
"dual socket HT, 12 cpus free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
},
{
"dual socket HT, 11 cpus free",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
},
{
"dual socket HT, 10 cpus free",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
[]int{2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeCPUs()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
}
}
func TestCPUAccumulatorTake(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
takeCPUs []cpuset.CPUSet
numCPUs int
expectSatisfied bool
expectFailed bool
}{
{
"take 0 cpus from a single socket HT, require 1",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet()},
1,
false,
false,
},
{
"take 0 cpus from a single socket HT, require 1, none available",
topoSingleSocketHT,
cpuset.NewCPUSet(),
[]cpuset.CPUSet{cpuset.NewCPUSet()},
1,
false,
true,
},
{
"take 1 cpu from a single socket HT, require 1",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet(0)},
1,
true,
false,
},
{
"take 1 cpu from a single socket HT, require 2",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet(0)},
2,
false,
false,
},
{
"take 2 cpu from a single socket HT, require 4, expect failed",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2),
[]cpuset.CPUSet{cpuset.NewCPUSet(0), cpuset.NewCPUSet(1)},
4,
false,
true,
},
{
"take all cpus one at a time from a single socket HT, require 8",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{
cpuset.NewCPUSet(0),
cpuset.NewCPUSet(1),
cpuset.NewCPUSet(2),
cpuset.NewCPUSet(3),
cpuset.NewCPUSet(4),
cpuset.NewCPUSet(5),
cpuset.NewCPUSet(6),
cpuset.NewCPUSet(7),
},
8,
true,
false,
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, tc.numCPUs)
totalTaken := 0
for _, cpus := range tc.takeCPUs {
acc.take(cpus)
totalTaken += cpus.Size()
}
if tc.expectSatisfied != acc.isSatisfied() {
t.Errorf("[%s] expected acc.isSatisfied() to be %t", tc.description, tc.expectSatisfied)
}
if tc.expectFailed != acc.isFailed() {
t.Errorf("[%s] expected acc.isFailed() to be %t", tc.description, tc.expectFailed)
}
for _, cpus := range tc.takeCPUs {
availableCPUs := acc.details.CPUs()
if cpus.Intersection(availableCPUs).Size() > 0 {
t.Errorf("[%s] expected intersection of taken cpus [%s] and acc.details.CPUs() [%s] to be empty", tc.description, cpus, availableCPUs)
}
if !cpus.IsSubsetOf(acc.result) {
t.Errorf("[%s] expected [%s] to be a subset of acc.result [%s]", tc.description, cpus, acc.result)
}
}
expNumCPUsNeeded := tc.numCPUs - totalTaken
if acc.numCPUsNeeded != expNumCPUsNeeded {
t.Errorf("[%s] expected acc.numCPUsNeeded to be %d (got %d)", tc.description, expNumCPUsNeeded, acc.numCPUsNeeded)
}
}
}
func TestTakeByTopology(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
numCPUs int
expErr string
expResult cpuset.CPUSet
}{
{
"take more cpus than are available from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 2, 4, 6),
5,
"not enough cpus available to satisfy request",
cpuset.NewCPUSet(),
},
{
"take zero cpus from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
0,
"",
cpuset.NewCPUSet(),
},
{
"take one cpu from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
1,
"",
cpuset.NewCPUSet(0),
},
{
"take one cpu from single socket with HT, some cpus are taken",
topoSingleSocketHT,
cpuset.NewCPUSet(1, 3, 5, 6, 7),
1,
"",
cpuset.NewCPUSet(6),
},
{
"take two cpus from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
2,
"",
cpuset.NewCPUSet(0, 4),
},
{
"take all cpus from single socket with HT",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
8,
"",
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
},
{
"take two cpus from single socket with HT, only one core totally free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 6),
2,
"",
cpuset.NewCPUSet(2, 6),
},
{
"take three cpus from dual socket with HT - core from Socket 0",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1,
"",
cpuset.NewCPUSet(2),
},
{
"take a socket of cpus from dual socket with HT",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
6,
"",
cpuset.NewCPUSet(0, 2, 4, 6, 8, 10),
},
}
for _, tc := range testCases {
result, err := takeByTopology(tc.topo, tc.availableCPUs, tc.numCPUs)
if tc.expErr != "" && err.Error() != tc.expErr {
t.Errorf("expected error to be [%v] but it was [%v] in test \"%s\"", tc.expErr, err, tc.description)
}
if !result.Equals(tc.expResult) {
t.Errorf("expected result [%s] to equal [%s] in test \"%s\"", result, tc.expResult, tc.description)
}
}
}

View File

@ -0,0 +1,290 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"math"
"sync"
"time"
"github.com/golang/glog"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/status"
"path"
)
// ActivePodsFunc is a function that returns a list of pods to reconcile.
type ActivePodsFunc func() []*v1.Pod
type runtimeService interface {
UpdateContainerResources(id string, resources *runtimeapi.LinuxContainerResources) error
}
type policyName string
// CPUManagerStateFileName is the name file name where cpu manager stores it's state
const CPUManagerStateFileName = "cpu_manager_state"
// Manager interface provides methods for Kubelet to manage pod cpus.
type Manager interface {
// Start is called during Kubelet initialization.
Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService)
// AddContainer is called between container create and container start
// so that initial CPU affinity settings can be written through to the
// container runtime before the first process begins to execute.
AddContainer(p *v1.Pod, c *v1.Container, containerID string) error
// RemoveContainer is called after Kubelet decides to kill or delete a
// container. After this call, the CPU manager stops trying to reconcile
// that container and any CPUs dedicated to the container are freed.
RemoveContainer(containerID string) error
// State returns a read-only interface to the internal CPU manager state.
State() state.Reader
}
type manager struct {
sync.Mutex
policy Policy
// reconcilePeriod is the duration between calls to reconcileState.
reconcilePeriod time.Duration
// state allows pluggable CPU assignment policies while sharing a common
// representation of state for the system to inspect and reconcile.
state state.State
// containerRuntime is the container runtime service interface needed
// to make UpdateContainerResources() calls against the containers.
containerRuntime runtimeService
// activePods is a method for listing active pods on the node
// so all the containers can be updated in the reconciliation loop.
activePods ActivePodsFunc
// podStatusProvider provides a method for obtaining pod statuses
// and the containerID of their containers
podStatusProvider status.PodStatusProvider
machineInfo *cadvisorapi.MachineInfo
nodeAllocatableReservation v1.ResourceList
}
var _ Manager = &manager{}
// NewManager creates new cpu manager based on provided policy
func NewManager(
cpuPolicyName string,
reconcilePeriod time.Duration,
machineInfo *cadvisorapi.MachineInfo,
nodeAllocatableReservation v1.ResourceList,
stateFileDirecory string,
) (Manager, error) {
var policy Policy
switch policyName(cpuPolicyName) {
case PolicyNone:
policy = NewNonePolicy()
case PolicyStatic:
topo, err := topology.Discover(machineInfo)
if err != nil {
return nil, err
}
glog.Infof("[cpumanager] detected CPU topology: %v", topo)
reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU]
if !ok {
// The static policy cannot initialize without this information. Panic!
panic("[cpumanager] unable to determine reserved CPU resources for static policy")
}
if reservedCPUs.IsZero() {
// Panic!
//
// The static policy requires this to be nonzero. Zero CPU reservation
// would allow the shared pool to be completely exhausted. At that point
// either we would violate our guarantee of exclusivity or need to evict
// any pod that has at least one container that requires zero CPUs.
// See the comments in policy_static.go for more details.
panic("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero")
}
// Take the ceiling of the reservation, since fractional CPUs cannot be
// exclusively allocated.
reservedCPUsFloat := float64(reservedCPUs.MilliValue()) / 1000
numReservedCPUs := int(math.Ceil(reservedCPUsFloat))
policy = NewStaticPolicy(topo, numReservedCPUs)
default:
glog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone)
policy = NewNonePolicy()
}
stateImpl := state.NewFileState(
path.Join(stateFileDirecory, CPUManagerStateFileName),
policy.Name())
manager := &manager{
policy: policy,
reconcilePeriod: reconcilePeriod,
state: stateImpl,
machineInfo: machineInfo,
nodeAllocatableReservation: nodeAllocatableReservation,
}
return manager, nil
}
func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) {
glog.Infof("[cpumanger] starting with %s policy", m.policy.Name())
glog.Infof("[cpumanger] reconciling every %v", m.reconcilePeriod)
m.activePods = activePods
m.podStatusProvider = podStatusProvider
m.containerRuntime = containerRuntime
m.policy.Start(m.state)
if m.policy.Name() == string(PolicyNone) {
return
}
go wait.Until(func() { m.reconcileState() }, m.reconcilePeriod, wait.NeverStop)
}
func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) error {
m.Lock()
err := m.policy.AddContainer(m.state, p, c, containerID)
if err != nil {
glog.Errorf("[cpumanager] AddContainer error: %v", err)
m.Unlock()
return err
}
cpus := m.state.GetCPUSetOrDefault(containerID)
m.Unlock()
if !cpus.IsEmpty() {
err = m.updateContainerCPUSet(containerID, cpus)
if err != nil {
glog.Errorf("[cpumanager] AddContainer error: %v", err)
return err
}
} else {
glog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty")
}
return nil
}
func (m *manager) RemoveContainer(containerID string) error {
m.Lock()
defer m.Unlock()
err := m.policy.RemoveContainer(m.state, containerID)
if err != nil {
glog.Errorf("[cpumanager] RemoveContainer error: %v", err)
return err
}
return nil
}
func (m *manager) State() state.Reader {
return m.state
}
type reconciledContainer struct {
podName string
containerName string
containerID string
}
func (m *manager) reconcileState() (success []reconciledContainer, failure []reconciledContainer) {
success = []reconciledContainer{}
failure = []reconciledContainer{}
for _, pod := range m.activePods() {
allContainers := pod.Spec.InitContainers
allContainers = append(allContainers, pod.Spec.Containers...)
for _, container := range allContainers {
status, ok := m.podStatusProvider.GetPodStatus(pod.UID)
if !ok {
glog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name)
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
break
}
containerID, err := findContainerIDByName(&status, container.Name)
if err != nil {
glog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err)
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
continue
}
cset := m.state.GetCPUSetOrDefault(containerID)
if cset.IsEmpty() {
// NOTE: This should not happen outside of tests.
glog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name)
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
continue
}
glog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset)
err = m.updateContainerCPUSet(containerID, cset)
if err != nil {
glog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err)
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
continue
}
success = append(success, reconciledContainer{pod.Name, container.Name, containerID})
}
}
return success, failure
}
func findContainerIDByName(status *v1.PodStatus, name string) (string, error) {
for _, container := range status.ContainerStatuses {
if container.Name == name && container.ContainerID != "" {
cid := &kubecontainer.ContainerID{}
err := cid.ParseString(container.ContainerID)
if err != nil {
return "", err
}
return cid.ID, nil
}
}
return "", fmt.Errorf("unable to find ID for container with name %v in pod status (it may not be running)", name)
}
func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet) error {
// TODO: Consider adding a `ResourceConfigForContainer` helper in
// helpers_linux.go similar to what exists for pods.
// It would be better to pass the full container resources here instead of
// this patch-like partial resources.
return m.containerRuntime.UpdateContainerResources(
containerID,
&runtimeapi.LinuxContainerResources{
CpusetCpus: cpus.String(),
})
}

View File

@ -0,0 +1,591 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"reflect"
"strings"
"testing"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"io/ioutil"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"os"
)
type mockState struct {
assignments state.ContainerCPUAssignments
defaultCPUSet cpuset.CPUSet
}
func (s *mockState) GetCPUSet(containerID string) (cpuset.CPUSet, bool) {
res, ok := s.assignments[containerID]
return res.Clone(), ok
}
func (s *mockState) GetDefaultCPUSet() cpuset.CPUSet {
return s.defaultCPUSet.Clone()
}
func (s *mockState) GetCPUSetOrDefault(containerID string) cpuset.CPUSet {
if res, ok := s.GetCPUSet(containerID); ok {
return res
}
return s.GetDefaultCPUSet()
}
func (s *mockState) SetCPUSet(containerID string, cset cpuset.CPUSet) {
s.assignments[containerID] = cset
}
func (s *mockState) SetDefaultCPUSet(cset cpuset.CPUSet) {
s.defaultCPUSet = cset
}
func (s *mockState) Delete(containerID string) {
delete(s.assignments, containerID)
}
func (s *mockState) ClearState() {
s.defaultCPUSet = cpuset.CPUSet{}
s.assignments = make(state.ContainerCPUAssignments)
}
func (s *mockState) SetCPUAssignments(a state.ContainerCPUAssignments) {
s.assignments = a.Clone()
}
func (s *mockState) GetCPUAssignments() state.ContainerCPUAssignments {
return s.assignments.Clone()
}
type mockPolicy struct {
err error
}
func (p *mockPolicy) Name() string {
return "mock"
}
func (p *mockPolicy) Start(s state.State) {
}
func (p *mockPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
return p.err
}
func (p *mockPolicy) RemoveContainer(s state.State, containerID string) error {
return p.err
}
type mockRuntimeService struct {
err error
}
func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.LinuxContainerResources) error {
return rt.err
}
type mockPodStatusProvider struct {
podStatus v1.PodStatus
found bool
}
func (psp mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
return psp.podStatus, psp.found
}
type mockPodKiller struct {
killedPods []*v1.Pod
}
func (f *mockPodKiller) killPodNow(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error {
f.killedPods = append(f.killedPods, pod)
return nil
}
type mockPodProvider struct {
pods []*v1.Pod
}
func (f *mockPodProvider) getPods() []*v1.Pod {
return f.pods
}
type mockRecorder struct{}
func (r *mockRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
}
func makePod(cpuRequest, cpuLimit string) *v1.Pod {
return &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpuRequest),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
},
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpuLimit),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
},
},
},
},
},
}
}
// CpuAllocatable must be <= CpuCapacity
func prepareCPUNodeStatus(CPUCapacity, CPUAllocatable string) v1.NodeStatus {
nodestatus := v1.NodeStatus{
Capacity: make(v1.ResourceList, 1),
Allocatable: make(v1.ResourceList, 1),
}
cpucap, _ := resource.ParseQuantity(CPUCapacity)
cpuall, _ := resource.ParseQuantity(CPUAllocatable)
nodestatus.Capacity[v1.ResourceCPU] = cpucap
nodestatus.Allocatable[v1.ResourceCPU] = cpuall
return nodestatus
}
func TestCPUManagerAdd(t *testing.T) {
testCases := []struct {
description string
regErr error
updateErr error
expErr error
}{
{
description: "cpu manager add - no error",
regErr: nil,
updateErr: nil,
expErr: nil,
},
{
description: "cpu manager add - policy add container error",
regErr: fmt.Errorf("fake reg error"),
updateErr: nil,
expErr: fmt.Errorf("fake reg error"),
},
{
description: "cpu manager add - container update error",
regErr: nil,
updateErr: fmt.Errorf("fake update error"),
expErr: nil,
},
}
for _, testCase := range testCases {
mgr := &manager{
policy: &mockPolicy{
err: testCase.regErr,
},
state: &mockState{
assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
containerRuntime: mockRuntimeService{
err: testCase.updateErr,
},
activePods: func() []*v1.Pod { return nil },
podStatusProvider: mockPodStatusProvider{},
}
pod := makePod("1000", "1000")
container := &pod.Spec.Containers[0]
err := mgr.AddContainer(pod, container, "fakeID")
if !reflect.DeepEqual(err, testCase.expErr) {
t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v",
testCase.description, testCase.expErr, err)
}
}
}
func TestCPUManagerGenerate(t *testing.T) {
testCases := []struct {
description string
cpuPolicyName string
nodeAllocatableReservation v1.ResourceList
isTopologyBroken bool
panicMsg string
expectedPolicy string
expectedError error
skipIfPermissionsError bool
}{
{
description: "set none policy",
cpuPolicyName: "none",
nodeAllocatableReservation: nil,
expectedPolicy: "none",
},
{
description: "invalid policy name",
cpuPolicyName: "invalid",
nodeAllocatableReservation: nil,
expectedPolicy: "none",
},
{
description: "static policy",
cpuPolicyName: "static",
nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(3, resource.DecimalSI)},
expectedPolicy: "static",
skipIfPermissionsError: true,
},
{
description: "static policy - broken topology",
cpuPolicyName: "static",
nodeAllocatableReservation: v1.ResourceList{},
isTopologyBroken: true,
expectedError: fmt.Errorf("could not detect number of cpus"),
skipIfPermissionsError: true,
},
{
description: "static policy - broken reservation",
cpuPolicyName: "static",
nodeAllocatableReservation: v1.ResourceList{},
panicMsg: "unable to determine reserved CPU resources for static policy",
skipIfPermissionsError: true,
},
{
description: "static policy - no CPU resources",
cpuPolicyName: "static",
nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)},
panicMsg: "the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero",
skipIfPermissionsError: true,
},
}
mockedMachineInfo := cadvisorapi.MachineInfo{
NumCores: 4,
Topology: []cadvisorapi.Node{
{
Cores: []cadvisorapi.Core{
{
Id: 0,
Threads: []int{0},
},
{
Id: 1,
Threads: []int{1},
},
{
Id: 2,
Threads: []int{2},
},
{
Id: 3,
Threads: []int{3},
},
},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
machineInfo := &mockedMachineInfo
if testCase.isTopologyBroken {
machineInfo = &cadvisorapi.MachineInfo{}
}
sDir, err := ioutil.TempDir("/tmp/", "cpu_manager_test")
if err != nil {
t.Errorf("cannot create state file: %s", err.Error())
}
defer os.RemoveAll(sDir)
defer func() {
if err := recover(); err != nil {
if testCase.panicMsg != "" {
if !strings.Contains(err.(string), testCase.panicMsg) {
t.Errorf("Unexpected panic message. Have: %q wants %q", err, testCase.panicMsg)
}
} else {
t.Errorf("Unexpected panic: %q", err)
}
} else if testCase.panicMsg != "" {
t.Error("Expected panic hasn't been raised")
}
}()
mgr, err := NewManager(testCase.cpuPolicyName, 5*time.Second, machineInfo, testCase.nodeAllocatableReservation, sDir)
if testCase.expectedError != nil {
if !strings.Contains(err.Error(), testCase.expectedError.Error()) {
t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error())
}
} else {
rawMgr := mgr.(*manager)
if rawMgr.policy.Name() != testCase.expectedPolicy {
t.Errorf("Unexpected policy name. Have: %q wants %q", rawMgr.policy.Name(), testCase.expectedPolicy)
}
}
})
}
}
func TestCPUManagerRemove(t *testing.T) {
mgr := &manager{
policy: &mockPolicy{
err: nil,
},
state: &mockState{
assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
containerRuntime: mockRuntimeService{},
activePods: func() []*v1.Pod { return nil },
podStatusProvider: mockPodStatusProvider{},
}
err := mgr.RemoveContainer("fakeID")
if err != nil {
t.Errorf("CPU Manager RemoveContainer() error. expected error to be nil but got: %v", err)
}
mgr = &manager{
policy: &mockPolicy{
err: fmt.Errorf("fake error"),
},
state: state.NewMemoryState(),
containerRuntime: mockRuntimeService{},
activePods: func() []*v1.Pod { return nil },
podStatusProvider: mockPodStatusProvider{},
}
err = mgr.RemoveContainer("fakeID")
if !reflect.DeepEqual(err, fmt.Errorf("fake error")) {
t.Errorf("CPU Manager RemoveContainer() error. expected error: fake error but got: %v", err)
}
}
func TestReconcileState(t *testing.T) {
testCases := []struct {
description string
activePods []*v1.Pod
pspPS v1.PodStatus
pspFound bool
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
updateErr error
expectFailedContainerName string
}{
{
description: "cpu manager reconclie - no error",
activePods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "fakePodName",
UID: "fakeUID",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fakeName",
},
},
},
},
},
pspPS: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Name: "fakeName",
ContainerID: "docker://fakeID",
},
},
},
pspFound: true,
stAssignments: state.ContainerCPUAssignments{
"fakeID": cpuset.NewCPUSet(1, 2),
},
stDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7),
updateErr: nil,
expectFailedContainerName: "",
},
{
description: "cpu manager reconclie - pod status not found",
activePods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "fakePodName",
UID: "fakeUID",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fakeName",
},
},
},
},
},
pspPS: v1.PodStatus{},
pspFound: false,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(),
updateErr: nil,
expectFailedContainerName: "fakeName",
},
{
description: "cpu manager reconclie - container id not found",
activePods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "fakePodName",
UID: "fakeUID",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fakeName",
},
},
},
},
},
pspPS: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Name: "fakeName1",
ContainerID: "docker://fakeID",
},
},
},
pspFound: true,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(),
updateErr: nil,
expectFailedContainerName: "fakeName",
},
{
description: "cpu manager reconclie - cpuset is empty",
activePods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "fakePodName",
UID: "fakeUID",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fakeName",
},
},
},
},
},
pspPS: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Name: "fakeName",
ContainerID: "docker://fakeID",
},
},
},
pspFound: true,
stAssignments: state.ContainerCPUAssignments{
"fakeID": cpuset.NewCPUSet(),
},
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
updateErr: nil,
expectFailedContainerName: "fakeName",
},
{
description: "cpu manager reconclie - container update error",
activePods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "fakePodName",
UID: "fakeUID",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fakeName",
},
},
},
},
},
pspPS: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Name: "fakeName",
ContainerID: "docker://fakeID",
},
},
},
pspFound: true,
stAssignments: state.ContainerCPUAssignments{
"fakeID": cpuset.NewCPUSet(1, 2),
},
stDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7),
updateErr: fmt.Errorf("fake container update error"),
expectFailedContainerName: "fakeName",
},
}
for _, testCase := range testCases {
mgr := &manager{
policy: &mockPolicy{
err: nil,
},
state: &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
},
containerRuntime: mockRuntimeService{
err: testCase.updateErr,
},
activePods: func() []*v1.Pod {
return testCase.activePods
},
podStatusProvider: mockPodStatusProvider{
podStatus: testCase.pspPS,
found: testCase.pspFound,
},
}
_, failure := mgr.reconcileState()
if testCase.expectFailedContainerName != "" {
// Search failed reconciled containers for the supplied name.
foundFailedContainer := false
for _, reconciled := range failure {
if reconciled.containerName == testCase.expectFailedContainerName {
foundFailedContainer = true
break
}
}
if !foundFailedContainer {
t.Errorf("Expected reconciliation failure for container: %s", testCase.expectFailedContainerName)
}
}
}
}

View File

@ -0,0 +1,58 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/status"
)
type fakeManager struct {
state state.State
}
func (m *fakeManager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) {
glog.Info("[fake cpumanager] Start()")
}
func (m *fakeManager) Policy() Policy {
glog.Info("[fake cpumanager] Policy()")
return NewNonePolicy()
}
func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) error {
glog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
return nil
}
func (m *fakeManager) RemoveContainer(containerID string) error {
glog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID)
return nil
}
func (m *fakeManager) State() state.Reader {
return m.state
}
// NewFakeManager creates empty/fake cpu manager
func NewFakeManager() Manager {
return &fakeManager{
state: state.NewMemoryState(),
}
}

View File

@ -0,0 +1,30 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
)
// Policy implements logic for pod container to CPU assignment.
type Policy interface {
Name() string
Start(s state.State)
AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error
RemoveContainer(s state.State, containerID string) error
}

View File

@ -0,0 +1,51 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
)
type nonePolicy struct{}
var _ Policy = &nonePolicy{}
// PolicyNone name of none policy
const PolicyNone policyName = "none"
// NewNonePolicy returns a cupset manager policy that does nothing
func NewNonePolicy() Policy {
return &nonePolicy{}
}
func (p *nonePolicy) Name() string {
return string(PolicyNone)
}
func (p *nonePolicy) Start(s state.State) {
glog.Info("[cpumanager] none policy: Start")
}
func (p *nonePolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
return nil
}
func (p *nonePolicy) RemoveContainer(s state.State, containerID string) error {
return nil
}

View File

@ -0,0 +1,65 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"testing"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
func TestNonePolicyName(t *testing.T) {
policy := &nonePolicy{}
policyName := policy.Name()
if policyName != "none" {
t.Errorf("NonePolicy Name() error. expected: none, returned: %v",
policyName)
}
}
func TestNonePolicyAdd(t *testing.T) {
policy := &nonePolicy{}
st := &mockState{
assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
}
testPod := makePod("1000m", "1000m")
container := &testPod.Spec.Containers[0]
err := policy.AddContainer(st, testPod, container, "fakeID")
if err != nil {
t.Errorf("NonePolicy AddContainer() error. expected no error but got: %v", err)
}
}
func TestNonePolicyRemove(t *testing.T) {
policy := &nonePolicy{}
st := &mockState{
assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
}
err := policy.RemoveContainer(st, "fakeID")
if err != nil {
t.Errorf("NonePolicy RemoveContainer() error. expected no error but got %v", err)
}
}

View File

@ -0,0 +1,208 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
// PolicyStatic is the name of the static policy
const PolicyStatic policyName = "static"
var _ Policy = &staticPolicy{}
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
//
// This policy allocates CPUs exclusively for a container if all the following
// conditions are met:
//
// - The pod QoS class is Guaranteed.
// - The CPU request is a positive integer.
//
// The static policy maintains the following sets of logical CPUs:
//
// - SHARED: Burstable, BestEffort, and non-integral Guaranteed containers
// run here. Initially this contains all CPU IDs on the system. As
// exclusive allocations are created and destroyed, this CPU set shrinks
// and grows, accordingly. This is stored in the state as the default
// CPU set.
//
// - RESERVED: A subset of the shared pool which is not exclusively
// allocatable. The membership of this pool is static for the lifetime of
// the Kubelet. The size of the reserved pool is
// ceil(systemreserved.cpu + kubereserved.cpu).
// Reserved CPUs are taken topologically starting with lowest-indexed
// physical core, as reported by cAdvisor.
//
// - ASSIGNABLE: Equal to SHARED - RESERVED. Exclusive CPUs are allocated
// from this pool.
//
// - EXCLUSIVE ALLOCATIONS: CPU sets assigned exclusively to one container.
// These are stored as explicit assignments in the state.
//
// When an exclusive allocation is made, the static policy also updates the
// default cpuset in the state abstraction. The CPU manager's periodic
// reconcile loop takes care of rewriting the cpuset in cgroupfs for any
// containers that may be running in the shared pool. For this reason,
// applications running within exclusively-allocated containers must tolerate
// potentially sharing their allocated CPUs for up to the CPU manager
// reconcile period.
type staticPolicy struct {
// cpu socket topology
topology *topology.CPUTopology
// set of CPUs that is not available for exclusive assignment
reserved cpuset.CPUSet
}
// Ensure staticPolicy implements Policy interface
var _ Policy = &staticPolicy{}
// NewStaticPolicy returns a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int) Policy {
allCPUs := topology.CPUDetails.CPUs()
// takeByTopology allocates CPUs associated with low-numbered cores from
// allCPUs.
//
// For example: Given a system with 8 CPUs available and HT enabled,
// if numReservedCPUs=2, then reserved={0,4}
reserved, _ := takeByTopology(topology, allCPUs, numReservedCPUs)
if reserved.Size() != numReservedCPUs {
panic(fmt.Sprintf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs))
}
glog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved)
return &staticPolicy{
topology: topology,
reserved: reserved,
}
}
func (p *staticPolicy) Name() string {
return string(PolicyStatic)
}
func (p *staticPolicy) Start(s state.State) {
if err := p.validateState(s); err != nil {
glog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error())
panic("[cpumanager] - please drain node and remove policy state file")
}
}
func (p *staticPolicy) validateState(s state.State) error {
tmpAssignments := s.GetCPUAssignments()
tmpDefaultCPUset := s.GetDefaultCPUSet()
// Default cpuset cannot be empty when assignments exist
if tmpDefaultCPUset.IsEmpty() {
if len(tmpAssignments) != 0 {
return fmt.Errorf("default cpuset cannot be empty")
}
// state is empty initialize
allCPUs := p.topology.CPUDetails.CPUs()
s.SetDefaultCPUSet(allCPUs)
return nil
}
// State has already been initialized from file (is not empty)
// 1 Check if the reserved cpuset is not part of default cpuset because:
// - kube/system reserved have changed (increased) - may lead to some containers not being able to start
// - user tampered with file
if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) {
return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"",
p.reserved.String(), tmpDefaultCPUset.String())
}
// 2. Check if state for static policy is consistent
for cID, cset := range tmpAssignments {
// None of the cpu in DEFAULT cset should be in s.assignments
if !tmpDefaultCPUset.Intersection(cset).IsEmpty() {
return fmt.Errorf("container id: %s cpuset: \"%s\" overlaps with default cpuset \"%s\"",
cID, cset.String(), tmpDefaultCPUset.String())
}
}
return nil
}
// assignableCPUs returns the set of unassigned CPUs minus the reserved set.
func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet {
return s.GetDefaultCPUSet().Difference(p.reserved)
}
func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
if numCPUs := guaranteedCPUs(pod, container); numCPUs != 0 {
// container belongs in an exclusively allocated pool
cpuset, err := p.allocateCPUs(s, numCPUs)
if err != nil {
glog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err)
return err
}
s.SetCPUSet(containerID, cpuset)
}
// container belongs in the shared pool (nothing to do; use default cpuset)
return nil
}
func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error {
glog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID)
if toRelease, ok := s.GetCPUSet(containerID); ok {
s.Delete(containerID)
// Mutate the shared pool, adding released cpus.
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease))
}
return nil
}
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet, error) {
glog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs)
result, err := takeByTopology(p.topology, p.assignableCPUs(s), numCPUs)
if err != nil {
return cpuset.NewCPUSet(), err
}
// Remove allocated CPUs from the shared CPUSet.
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result))
glog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result)
return result, nil
}
func guaranteedCPUs(pod *v1.Pod, container *v1.Container) int {
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
return 0
}
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
if cpuQuantity.Value()*1000 != cpuQuantity.MilliValue() {
return 0
}
// Safe downcast to do for all systems with < 2.1 billion CPUs.
// Per the language spec, `int` is guaranteed to be at least 32 bits wide.
// https://golang.org/ref/spec#Numeric_types
return int(cpuQuantity.Value())
}

View File

@ -0,0 +1,494 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"reflect"
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
type staticPolicyTest struct {
description string
topo *topology.CPUTopology
numReservedCPUs int
containerID string
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
expErr error
expCPUAlloc bool
expCSet cpuset.CPUSet
expPanic bool
}
func TestStaticPolicyName(t *testing.T) {
policy := NewStaticPolicy(topoSingleSocketHT, 1)
policyName := policy.Name()
if policyName != "static" {
t.Errorf("StaticPolicy Name() error. expected: static, returned: %v",
policyName)
}
}
func TestStaticPolicyStart(t *testing.T) {
testCases := []staticPolicyTest{
{
description: "non-corrupted state",
topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{
"0": cpuset.NewCPUSet(0),
},
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
},
{
description: "empty cpuset",
topo: topoDualSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(),
expCSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
},
{
description: "reserved cores 0 & 6 are not present in available cpuset",
topo: topoDualSocketHT,
numReservedCPUs: 2,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1),
expPanic: true,
},
{
description: "assigned core 2 is still present in available cpuset",
topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{
"0": cpuset.NewCPUSet(0, 1, 2),
},
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expPanic: true,
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
defer func() {
if err := recover(); err != nil {
if !testCase.expPanic {
t.Errorf("unexpected panic occured: %q", err)
}
} else if testCase.expPanic {
t.Error("expected panic doesn't occured")
}
}()
policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs).(*staticPolicy)
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
policy.Start(st)
if !testCase.stDefaultCPUSet.IsEmpty() {
for cpuid := 1; cpuid < policy.topology.NumCPUs; cpuid++ {
if !st.defaultCPUSet.Contains(cpuid) {
t.Errorf("StaticPolicy Start() error. expected cpuid %d to be present in defaultCPUSet", cpuid)
}
}
}
if !st.GetDefaultCPUSet().Equals(testCase.expCSet) {
t.Errorf("State CPUSet is different than expected. Have %q wants: %q", st.GetDefaultCPUSet(),
testCase.expCSet)
}
})
}
}
func TestStaticPolicyAdd(t *testing.T) {
largeTopoBuilder := cpuset.NewBuilder()
largeTopoSock0Builder := cpuset.NewBuilder()
largeTopoSock1Builder := cpuset.NewBuilder()
largeTopo := *topoQuadSocketFourWayHT
for cpuid, val := range largeTopo.CPUDetails {
largeTopoBuilder.Add(cpuid)
if val.SocketID == 0 {
largeTopoSock0Builder.Add(cpuid)
} else if val.SocketID == 1 {
largeTopoSock1Builder.Add(cpuid)
}
}
largeTopoCPUSet := largeTopoBuilder.Result()
largeTopoSock0CPUSet := largeTopoSock0Builder.Result()
largeTopoSock1CPUSet := largeTopoSock1Builder.Result()
testCases := []staticPolicyTest{
{
description: "GuPodSingleCore, SingleSocketHT, ExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID2",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("8000m", "8000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodSingleCore, SingleSocketHT, ExpectAllocOneCPU",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID2",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("1000m", "1000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4), // expect sibling of partial core
},
{
description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(2, 3, 6, 7),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5),
pod: makePod("2000m", "2000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 5),
},
{
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocket",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(2),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("6000m", "6000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 5, 7, 9, 11),
},
{
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocThreeCores",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(1, 5),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 2, 3, 4, 6, 7, 8, 9, 10, 11),
pod: makePod("6000m", "6000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(2, 3, 4, 8, 9, 10),
},
{
description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocOneSocket",
topo: topoDualSocketNoHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7),
pod: makePod("4000m", "4000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4, 5, 6, 7),
},
{
description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocFourCores",
topo: topoDualSocketNoHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(4, 5),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 6, 7),
pod: makePod("4000m", "4000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 6, 7),
},
{
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocketOneCore",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(2),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("8000m", "8000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 4, 5, 7, 9, 10, 11),
},
{
description: "NonGuPod, SingleSocketHT, NoAlloc",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("1000m", "2000m"),
expErr: nil,
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodNonIntegerCore, SingleSocketHT, NoAlloc",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID4",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("977m", "977m"),
expErr: nil,
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodMultipleCores, SingleSocketHT, NoAllocExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(1, 2, 3, 4, 5, 6),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 7),
pod: makePod("2000m", "2000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodMultipleCores, DualSocketHT, NoAllocExpectError",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(1, 2, 3),
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("10000m", "10000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
// All the CPUs from Socket 0 are available. Some CPUs from each
// Socket have been already assigned.
// Expect all CPUs from Socket 0.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocSock0",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(3, 11, 4, 5, 6, 7),
},
stDefaultCPUSet: largeTopoCPUSet.Difference(cpuset.NewCPUSet(3, 11, 4, 5, 6, 7)),
pod: makePod("72000m", "72000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: largeTopoSock0CPUSet,
},
{
// Only 2 full cores from three Sockets and some partial cores are available.
// Expect CPUs from the 2 full cores available from the three Sockets.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocAllFullCoresFromThreeSockets",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51,
53, 173, 113, 233, 54, 61)),
},
stDefaultCPUSet: cpuset.NewCPUSet(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51, 53, 173, 113, 233, 54, 61),
pod: makePod("12000m", "12000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 25, 13, 38, 11, 35, 23, 48, 53, 173, 113, 233),
},
{
// All CPUs from Socket 1, 1 full core and some partial cores are available.
// Expect all CPUs from Socket 1 and the hyper-threads from the full core.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocAllSock1+FullCore",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": largeTopoCPUSet.Difference(largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47, 53,
173, 61, 181, 108, 228, 115, 235))),
},
stDefaultCPUSet: largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47, 53, 173, 61, 181, 108, 228,
115, 235)),
pod: makePod("76000m", "76000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47)),
},
{
// Only partial cores are available in the entire system.
// Expect allocation of all the CPUs from the partial cores.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocCPUs",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)),
},
stDefaultCPUSet: cpuset.NewCPUSet(10, 11, 53, 67, 52),
pod: makePod("5000m", "5000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(10, 11, 53, 67, 52),
},
{
// Only 7 CPUs are available.
// Pod requests 76 cores.
// Error is expect since available CPUs are less than the request.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, NoAlloc",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)),
},
stDefaultCPUSet: cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52),
pod: makePod("76000m", "76000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
}
for _, testCase := range testCases {
policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs)
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
container := &testCase.pod.Spec.Containers[0]
err := policy.AddContainer(st, testCase.pod, container, testCase.containerID)
if !reflect.DeepEqual(err, testCase.expErr) {
t.Errorf("StaticPolicy AddContainer() error (%v). expected add error: %v but got: %v",
testCase.description, testCase.expErr, err)
}
if testCase.expCPUAlloc {
cset, found := st.assignments[testCase.containerID]
if !found {
t.Errorf("StaticPolicy AddContainer() error (%v). expected container id %v to be present in assignments %v",
testCase.description, testCase.containerID, st.assignments)
}
if !reflect.DeepEqual(cset, testCase.expCSet) {
t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v but got %v",
testCase.description, testCase.expCSet, cset)
}
if !cset.Intersection(st.defaultCPUSet).IsEmpty() {
t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v to be disoint from the shared cpuset %v",
testCase.description, cset, st.defaultCPUSet)
}
}
if !testCase.expCPUAlloc {
_, found := st.assignments[testCase.containerID]
if found {
t.Errorf("StaticPolicy AddContainer() error (%v). Did not expect container id %v to be present in assignments %v",
testCase.description, testCase.containerID, st.assignments)
}
}
}
}
func TestStaticPolicyRemove(t *testing.T) {
testCases := []staticPolicyTest{
{
description: "SingleSocketHT, DeAllocOneContainer",
topo: topoSingleSocketHT,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{
"fakeID1": cpuset.NewCPUSet(1, 2, 3),
},
stDefaultCPUSet: cpuset.NewCPUSet(4, 5, 6, 7),
expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
},
{
description: "SingleSocketHT, DeAllocOneContainer, BeginEmpty",
topo: topoSingleSocketHT,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{
"fakeID1": cpuset.NewCPUSet(1, 2, 3),
"fakeID2": cpuset.NewCPUSet(4, 5, 6, 7),
},
stDefaultCPUSet: cpuset.NewCPUSet(),
expCSet: cpuset.NewCPUSet(1, 2, 3),
},
{
description: "SingleSocketHT, DeAllocTwoContainer",
topo: topoSingleSocketHT,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{
"fakeID1": cpuset.NewCPUSet(1, 3, 5),
"fakeID2": cpuset.NewCPUSet(2, 4),
},
stDefaultCPUSet: cpuset.NewCPUSet(6, 7),
expCSet: cpuset.NewCPUSet(1, 3, 5, 6, 7),
},
{
description: "SingleSocketHT, NoDeAlloc",
topo: topoSingleSocketHT,
containerID: "fakeID2",
stAssignments: state.ContainerCPUAssignments{
"fakeID1": cpuset.NewCPUSet(1, 3, 5),
},
stDefaultCPUSet: cpuset.NewCPUSet(2, 4, 6, 7),
expCSet: cpuset.NewCPUSet(2, 4, 6, 7),
},
}
for _, testCase := range testCases {
policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs)
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
policy.RemoveContainer(st, testCase.containerID)
if !reflect.DeepEqual(st.defaultCPUSet, testCase.expCSet) {
t.Errorf("StaticPolicy RemoveContainer() error (%v). expected default cpuset %v but got %v",
testCase.description, testCase.expCSet, st.defaultCPUSet)
}
if _, found := st.assignments[testCase.containerID]; found {
t.Errorf("StaticPolicy RemoveContainer() error (%v). expected containerID %v not be in assignments %v",
testCase.description, testCase.containerID, st.assignments)
}
}
}

View File

@ -0,0 +1,390 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
)
var (
topoSingleSocketHT = &topology.CPUTopology{
NumCPUs: 8,
NumSockets: 1,
NumCores: 4,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 0},
4: {CoreID: 0, SocketID: 0},
5: {CoreID: 1, SocketID: 0},
6: {CoreID: 2, SocketID: 0},
7: {CoreID: 3, SocketID: 0},
},
}
topoDualSocketHT = &topology.CPUTopology{
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 1},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 1},
4: {CoreID: 4, SocketID: 0},
5: {CoreID: 5, SocketID: 1},
6: {CoreID: 0, SocketID: 0},
7: {CoreID: 1, SocketID: 1},
8: {CoreID: 2, SocketID: 0},
9: {CoreID: 3, SocketID: 1},
10: {CoreID: 4, SocketID: 0},
11: {CoreID: 5, SocketID: 1},
},
}
topoDualSocketNoHT = &topology.CPUTopology{
NumCPUs: 8,
NumSockets: 2,
NumCores: 8,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 0},
4: {CoreID: 4, SocketID: 1},
5: {CoreID: 5, SocketID: 1},
6: {CoreID: 6, SocketID: 1},
7: {CoreID: 7, SocketID: 1},
},
}
/*
Topology from https://www.open-mpi.org/projects/hwloc/lstopo/images/KNL.SNC4.H50.v1.11.png.
Socket0:
0-2,9-10,13-14,21-22,25-26,33-34,38-39,46-47,50,57-58,71-72,79-80,87-88,95-96,103-104,109-110,117-118,
131-132,139-140,147-148,155-156,163-164,169-170,177-178,191-192,199-200,207-208,215-216,223-224,229-230,
237-238,251-252,259-260,267-268,275-276,283-284
Socket1:
3-4,11-12,15-16,23-24,27-28,35-36,40-41,48-49,51-52,59-60,65-66,73-74,81-82,89-90,97-98,111-112,119-120,125-126,
133-134,141-142,149-150,157-158,171-172,179-180,185-186,193-194,201-202,209-210,217-218,231-232,239-240,245-246,
253-254,261-262,269-270,277-278
Socket2:
5-6,17-18,29-30,42-43,53-54,61-62,67-68,75-76,83-84,91-92,99-100,105-106,113-114,121-122,127-128,135-136,
143-144,151-152,159-160,165-166,173-174,181-182,187-188,195-196,203-204,211-212,219-220,225-226,233-234,241-242,
247-248,255-256,263-264,271-272,279-280,285-286
Socket3:
7-8,19-20,31-32,37,44-45,55-56,63-64,69-70,77-78,85-86,93-94,101-102,107-108,115-116,123-124,129-130,137-138,
145-146,153-154,161-162,167-168,175-176,183-184,189-190,197-198,205-206,213-214,221-222,227-228,235-236,243-244,
249-250,257-258,265-266,273-274,281-282,287
*/
topoQuadSocketFourWayHT = &topology.CPUTopology{
NumCPUs: 288,
NumSockets: 4,
NumCores: 72,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0},
169: {CoreID: 0, SocketID: 0},
109: {CoreID: 0, SocketID: 0},
229: {CoreID: 0, SocketID: 0},
50: {CoreID: 1, SocketID: 0},
170: {CoreID: 1, SocketID: 0},
110: {CoreID: 1, SocketID: 0},
230: {CoreID: 1, SocketID: 0},
1: {CoreID: 64, SocketID: 0},
25: {CoreID: 64, SocketID: 0},
13: {CoreID: 64, SocketID: 0},
38: {CoreID: 64, SocketID: 0},
2: {CoreID: 65, SocketID: 0},
26: {CoreID: 65, SocketID: 0},
14: {CoreID: 65, SocketID: 0},
39: {CoreID: 65, SocketID: 0},
9: {CoreID: 72, SocketID: 0},
33: {CoreID: 72, SocketID: 0},
21: {CoreID: 72, SocketID: 0},
46: {CoreID: 72, SocketID: 0},
10: {CoreID: 73, SocketID: 0},
34: {CoreID: 73, SocketID: 0},
22: {CoreID: 73, SocketID: 0},
47: {CoreID: 73, SocketID: 0},
57: {CoreID: 8, SocketID: 0},
177: {CoreID: 8, SocketID: 0},
117: {CoreID: 8, SocketID: 0},
237: {CoreID: 8, SocketID: 0},
58: {CoreID: 9, SocketID: 0},
178: {CoreID: 9, SocketID: 0},
118: {CoreID: 9, SocketID: 0},
238: {CoreID: 9, SocketID: 0},
71: {CoreID: 24, SocketID: 0},
191: {CoreID: 24, SocketID: 0},
131: {CoreID: 24, SocketID: 0},
251: {CoreID: 24, SocketID: 0},
72: {CoreID: 25, SocketID: 0},
192: {CoreID: 25, SocketID: 0},
132: {CoreID: 25, SocketID: 0},
252: {CoreID: 25, SocketID: 0},
79: {CoreID: 32, SocketID: 0},
199: {CoreID: 32, SocketID: 0},
139: {CoreID: 32, SocketID: 0},
259: {CoreID: 32, SocketID: 0},
80: {CoreID: 33, SocketID: 0},
200: {CoreID: 33, SocketID: 0},
140: {CoreID: 33, SocketID: 0},
260: {CoreID: 33, SocketID: 0},
87: {CoreID: 40, SocketID: 0},
207: {CoreID: 40, SocketID: 0},
147: {CoreID: 40, SocketID: 0},
267: {CoreID: 40, SocketID: 0},
88: {CoreID: 41, SocketID: 0},
208: {CoreID: 41, SocketID: 0},
148: {CoreID: 41, SocketID: 0},
268: {CoreID: 41, SocketID: 0},
95: {CoreID: 48, SocketID: 0},
215: {CoreID: 48, SocketID: 0},
155: {CoreID: 48, SocketID: 0},
275: {CoreID: 48, SocketID: 0},
96: {CoreID: 49, SocketID: 0},
216: {CoreID: 49, SocketID: 0},
156: {CoreID: 49, SocketID: 0},
276: {CoreID: 49, SocketID: 0},
103: {CoreID: 56, SocketID: 0},
223: {CoreID: 56, SocketID: 0},
163: {CoreID: 56, SocketID: 0},
283: {CoreID: 56, SocketID: 0},
104: {CoreID: 57, SocketID: 0},
224: {CoreID: 57, SocketID: 0},
164: {CoreID: 57, SocketID: 0},
284: {CoreID: 57, SocketID: 0},
3: {CoreID: 66, SocketID: 1},
27: {CoreID: 66, SocketID: 1},
15: {CoreID: 66, SocketID: 1},
40: {CoreID: 66, SocketID: 1},
4: {CoreID: 67, SocketID: 1},
28: {CoreID: 67, SocketID: 1},
16: {CoreID: 67, SocketID: 1},
41: {CoreID: 67, SocketID: 1},
11: {CoreID: 74, SocketID: 1},
35: {CoreID: 74, SocketID: 1},
23: {CoreID: 74, SocketID: 1},
48: {CoreID: 74, SocketID: 1},
12: {CoreID: 75, SocketID: 1},
36: {CoreID: 75, SocketID: 1},
24: {CoreID: 75, SocketID: 1},
49: {CoreID: 75, SocketID: 1},
51: {CoreID: 2, SocketID: 1},
171: {CoreID: 2, SocketID: 1},
111: {CoreID: 2, SocketID: 1},
231: {CoreID: 2, SocketID: 1},
52: {CoreID: 3, SocketID: 1},
172: {CoreID: 3, SocketID: 1},
112: {CoreID: 3, SocketID: 1},
232: {CoreID: 3, SocketID: 1},
59: {CoreID: 10, SocketID: 1},
179: {CoreID: 10, SocketID: 1},
119: {CoreID: 10, SocketID: 1},
239: {CoreID: 10, SocketID: 1},
60: {CoreID: 11, SocketID: 1},
180: {CoreID: 11, SocketID: 1},
120: {CoreID: 11, SocketID: 1},
240: {CoreID: 11, SocketID: 1},
65: {CoreID: 18, SocketID: 1},
185: {CoreID: 18, SocketID: 1},
125: {CoreID: 18, SocketID: 1},
245: {CoreID: 18, SocketID: 1},
66: {CoreID: 19, SocketID: 1},
186: {CoreID: 19, SocketID: 1},
126: {CoreID: 19, SocketID: 1},
246: {CoreID: 19, SocketID: 1},
73: {CoreID: 26, SocketID: 1},
193: {CoreID: 26, SocketID: 1},
133: {CoreID: 26, SocketID: 1},
253: {CoreID: 26, SocketID: 1},
74: {CoreID: 27, SocketID: 1},
194: {CoreID: 27, SocketID: 1},
134: {CoreID: 27, SocketID: 1},
254: {CoreID: 27, SocketID: 1},
81: {CoreID: 34, SocketID: 1},
201: {CoreID: 34, SocketID: 1},
141: {CoreID: 34, SocketID: 1},
261: {CoreID: 34, SocketID: 1},
82: {CoreID: 35, SocketID: 1},
202: {CoreID: 35, SocketID: 1},
142: {CoreID: 35, SocketID: 1},
262: {CoreID: 35, SocketID: 1},
89: {CoreID: 42, SocketID: 1},
209: {CoreID: 42, SocketID: 1},
149: {CoreID: 42, SocketID: 1},
269: {CoreID: 42, SocketID: 1},
90: {CoreID: 43, SocketID: 1},
210: {CoreID: 43, SocketID: 1},
150: {CoreID: 43, SocketID: 1},
270: {CoreID: 43, SocketID: 1},
97: {CoreID: 50, SocketID: 1},
217: {CoreID: 50, SocketID: 1},
157: {CoreID: 50, SocketID: 1},
277: {CoreID: 50, SocketID: 1},
98: {CoreID: 51, SocketID: 1},
218: {CoreID: 51, SocketID: 1},
158: {CoreID: 51, SocketID: 1},
278: {CoreID: 51, SocketID: 1},
5: {CoreID: 68, SocketID: 2},
29: {CoreID: 68, SocketID: 2},
17: {CoreID: 68, SocketID: 2},
42: {CoreID: 68, SocketID: 2},
6: {CoreID: 69, SocketID: 2},
30: {CoreID: 69, SocketID: 2},
18: {CoreID: 69, SocketID: 2},
43: {CoreID: 69, SocketID: 2},
53: {CoreID: 4, SocketID: 2},
173: {CoreID: 4, SocketID: 2},
113: {CoreID: 4, SocketID: 2},
233: {CoreID: 4, SocketID: 2},
54: {CoreID: 5, SocketID: 2},
174: {CoreID: 5, SocketID: 2},
114: {CoreID: 5, SocketID: 2},
234: {CoreID: 5, SocketID: 2},
61: {CoreID: 12, SocketID: 2},
181: {CoreID: 12, SocketID: 2},
121: {CoreID: 12, SocketID: 2},
241: {CoreID: 12, SocketID: 2},
62: {CoreID: 13, SocketID: 2},
182: {CoreID: 13, SocketID: 2},
122: {CoreID: 13, SocketID: 2},
242: {CoreID: 13, SocketID: 2},
67: {CoreID: 20, SocketID: 2},
187: {CoreID: 20, SocketID: 2},
127: {CoreID: 20, SocketID: 2},
247: {CoreID: 20, SocketID: 2},
68: {CoreID: 21, SocketID: 2},
188: {CoreID: 21, SocketID: 2},
128: {CoreID: 21, SocketID: 2},
248: {CoreID: 21, SocketID: 2},
75: {CoreID: 28, SocketID: 2},
195: {CoreID: 28, SocketID: 2},
135: {CoreID: 28, SocketID: 2},
255: {CoreID: 28, SocketID: 2},
76: {CoreID: 29, SocketID: 2},
196: {CoreID: 29, SocketID: 2},
136: {CoreID: 29, SocketID: 2},
256: {CoreID: 29, SocketID: 2},
83: {CoreID: 36, SocketID: 2},
203: {CoreID: 36, SocketID: 2},
143: {CoreID: 36, SocketID: 2},
263: {CoreID: 36, SocketID: 2},
84: {CoreID: 37, SocketID: 2},
204: {CoreID: 37, SocketID: 2},
144: {CoreID: 37, SocketID: 2},
264: {CoreID: 37, SocketID: 2},
91: {CoreID: 44, SocketID: 2},
211: {CoreID: 44, SocketID: 2},
151: {CoreID: 44, SocketID: 2},
271: {CoreID: 44, SocketID: 2},
92: {CoreID: 45, SocketID: 2},
212: {CoreID: 45, SocketID: 2},
152: {CoreID: 45, SocketID: 2},
272: {CoreID: 45, SocketID: 2},
99: {CoreID: 52, SocketID: 2},
219: {CoreID: 52, SocketID: 2},
159: {CoreID: 52, SocketID: 2},
279: {CoreID: 52, SocketID: 2},
100: {CoreID: 53, SocketID: 2},
220: {CoreID: 53, SocketID: 2},
160: {CoreID: 53, SocketID: 2},
280: {CoreID: 53, SocketID: 2},
105: {CoreID: 60, SocketID: 2},
225: {CoreID: 60, SocketID: 2},
165: {CoreID: 60, SocketID: 2},
285: {CoreID: 60, SocketID: 2},
106: {CoreID: 61, SocketID: 2},
226: {CoreID: 61, SocketID: 2},
166: {CoreID: 61, SocketID: 2},
286: {CoreID: 61, SocketID: 2},
7: {CoreID: 70, SocketID: 3},
31: {CoreID: 70, SocketID: 3},
19: {CoreID: 70, SocketID: 3},
44: {CoreID: 70, SocketID: 3},
8: {CoreID: 71, SocketID: 3},
32: {CoreID: 71, SocketID: 3},
20: {CoreID: 71, SocketID: 3},
45: {CoreID: 71, SocketID: 3},
37: {CoreID: 63, SocketID: 3},
168: {CoreID: 63, SocketID: 3},
108: {CoreID: 63, SocketID: 3},
228: {CoreID: 63, SocketID: 3},
107: {CoreID: 62, SocketID: 3},
227: {CoreID: 62, SocketID: 3},
167: {CoreID: 62, SocketID: 3},
287: {CoreID: 62, SocketID: 3},
55: {CoreID: 6, SocketID: 3},
175: {CoreID: 6, SocketID: 3},
115: {CoreID: 6, SocketID: 3},
235: {CoreID: 6, SocketID: 3},
56: {CoreID: 7, SocketID: 3},
176: {CoreID: 7, SocketID: 3},
116: {CoreID: 7, SocketID: 3},
236: {CoreID: 7, SocketID: 3},
63: {CoreID: 14, SocketID: 3},
183: {CoreID: 14, SocketID: 3},
123: {CoreID: 14, SocketID: 3},
243: {CoreID: 14, SocketID: 3},
64: {CoreID: 15, SocketID: 3},
184: {CoreID: 15, SocketID: 3},
124: {CoreID: 15, SocketID: 3},
244: {CoreID: 15, SocketID: 3},
69: {CoreID: 22, SocketID: 3},
189: {CoreID: 22, SocketID: 3},
129: {CoreID: 22, SocketID: 3},
249: {CoreID: 22, SocketID: 3},
70: {CoreID: 23, SocketID: 3},
190: {CoreID: 23, SocketID: 3},
130: {CoreID: 23, SocketID: 3},
250: {CoreID: 23, SocketID: 3},
77: {CoreID: 30, SocketID: 3},
197: {CoreID: 30, SocketID: 3},
137: {CoreID: 30, SocketID: 3},
257: {CoreID: 30, SocketID: 3},
78: {CoreID: 31, SocketID: 3},
198: {CoreID: 31, SocketID: 3},
138: {CoreID: 31, SocketID: 3},
258: {CoreID: 31, SocketID: 3},
85: {CoreID: 38, SocketID: 3},
205: {CoreID: 38, SocketID: 3},
145: {CoreID: 38, SocketID: 3},
265: {CoreID: 38, SocketID: 3},
86: {CoreID: 39, SocketID: 3},
206: {CoreID: 39, SocketID: 3},
146: {CoreID: 39, SocketID: 3},
266: {CoreID: 39, SocketID: 3},
93: {CoreID: 46, SocketID: 3},
213: {CoreID: 46, SocketID: 3},
153: {CoreID: 46, SocketID: 3},
273: {CoreID: 46, SocketID: 3},
94: {CoreID: 47, SocketID: 3},
214: {CoreID: 47, SocketID: 3},
154: {CoreID: 47, SocketID: 3},
274: {CoreID: 47, SocketID: 3},
101: {CoreID: 54, SocketID: 3},
221: {CoreID: 54, SocketID: 3},
161: {CoreID: 54, SocketID: 3},
281: {CoreID: 54, SocketID: 3},
102: {CoreID: 55, SocketID: 3},
222: {CoreID: 55, SocketID: 3},
162: {CoreID: 55, SocketID: 3},
282: {CoreID: 55, SocketID: 3},
},
}
)

View File

@ -0,0 +1,38 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"state.go",
"state_file.go",
"state_mem.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/cm/cpuset:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["state_file_test.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state",
library = ":go_default_library",
deps = ["//pkg/kubelet/cm/cpuset:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,55 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
// ContainerCPUAssignments type used in cpu manger state
type ContainerCPUAssignments map[string]cpuset.CPUSet
// Clone returns a copy of ContainerCPUAssignments
func (as ContainerCPUAssignments) Clone() ContainerCPUAssignments {
ret := make(ContainerCPUAssignments)
for key, val := range as {
ret[key] = val
}
return ret
}
// Reader interface used to read current cpu/pod assignment state
type Reader interface {
GetCPUSet(containerID string) (cpuset.CPUSet, bool)
GetDefaultCPUSet() cpuset.CPUSet
GetCPUSetOrDefault(containerID string) cpuset.CPUSet
GetCPUAssignments() ContainerCPUAssignments
}
type writer interface {
SetCPUSet(containerID string, cpuset cpuset.CPUSet)
SetDefaultCPUSet(cpuset cpuset.CPUSet)
SetCPUAssignments(ContainerCPUAssignments)
Delete(containerID string)
ClearState()
}
// State interface provides methods for tracking and setting cpu/pod assignment
type State interface {
Reader
writer
}

View File

@ -0,0 +1,204 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
"io/ioutil"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"os"
"sync"
)
type stateFileData struct {
PolicyName string `json:"policyName"`
DefaultCPUSet string `json:"defaultCpuSet"`
Entries map[string]string `json:"entries,omitempty"`
}
var _ State = &stateFile{}
type stateFile struct {
sync.RWMutex
stateFilePath string
policyName string
cache State
}
// NewFileState creates new State for keeping track of cpu/pod assignment with file backend
func NewFileState(filePath string, policyName string) State {
stateFile := &stateFile{
stateFilePath: filePath,
cache: NewMemoryState(),
policyName: policyName,
}
if err := stateFile.tryRestoreState(); err != nil {
// could not restore state, init new state file
glog.Infof("[cpumanager] state file: initializing empty state file - reason: \"%s\"", err)
stateFile.cache.ClearState()
stateFile.storeState()
}
return stateFile
}
// tryRestoreState tries to read state file, upon any error,
// err message is logged and state is left clean. un-initialized
func (sf *stateFile) tryRestoreState() error {
sf.Lock()
defer sf.Unlock()
var err error
// used when all parsing is ok
tmpAssignments := make(ContainerCPUAssignments)
tmpDefaultCPUSet := cpuset.NewCPUSet()
tmpContainerCPUSet := cpuset.NewCPUSet()
var content []byte
if content, err = ioutil.ReadFile(sf.stateFilePath); os.IsNotExist(err) {
// Create file
if _, err = os.Create(sf.stateFilePath); err != nil {
glog.Errorf("[cpumanager] state file: unable to create state file \"%s\":%s", sf.stateFilePath, err.Error())
panic("[cpumanager] state file not created")
}
glog.Infof("[cpumanager] state file: created empty state file \"%s\"", sf.stateFilePath)
} else {
// File exists - try to read
var readState stateFileData
if err = json.Unmarshal(content, &readState); err != nil {
glog.Warningf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath)
return err
}
if sf.policyName != readState.PolicyName {
return fmt.Errorf("policy configured \"%s\" != policy from state file \"%s\"", sf.policyName, readState.PolicyName)
}
if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil {
glog.Warningf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet)
return err
}
for containerID, cpuString := range readState.Entries {
if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil {
glog.Warningf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString)
return err
}
tmpAssignments[containerID] = tmpContainerCPUSet
}
sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet)
sf.cache.SetCPUAssignments(tmpAssignments)
glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath)
glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String())
}
return nil
}
// saves state to a file, caller is responsible for locking
func (sf *stateFile) storeState() {
var content []byte
var err error
data := stateFileData{
PolicyName: sf.policyName,
DefaultCPUSet: sf.cache.GetDefaultCPUSet().String(),
Entries: map[string]string{},
}
for containerID, cset := range sf.cache.GetCPUAssignments() {
data.Entries[containerID] = cset.String()
}
if content, err = json.Marshal(data); err != nil {
panic("[cpumanager] state file: could not serialize state to json")
}
if err = ioutil.WriteFile(sf.stateFilePath, content, 0644); err != nil {
panic("[cpumanager] state file not written")
}
return
}
func (sf *stateFile) GetCPUSet(containerID string) (cpuset.CPUSet, bool) {
sf.RLock()
defer sf.RUnlock()
res, ok := sf.cache.GetCPUSet(containerID)
return res, ok
}
func (sf *stateFile) GetDefaultCPUSet() cpuset.CPUSet {
sf.RLock()
defer sf.RUnlock()
return sf.cache.GetDefaultCPUSet()
}
func (sf *stateFile) GetCPUSetOrDefault(containerID string) cpuset.CPUSet {
sf.RLock()
defer sf.RUnlock()
return sf.cache.GetCPUSetOrDefault(containerID)
}
func (sf *stateFile) GetCPUAssignments() ContainerCPUAssignments {
sf.RLock()
defer sf.RUnlock()
return sf.cache.GetCPUAssignments()
}
func (sf *stateFile) SetCPUSet(containerID string, cset cpuset.CPUSet) {
sf.Lock()
defer sf.Unlock()
sf.cache.SetCPUSet(containerID, cset)
sf.storeState()
}
func (sf *stateFile) SetDefaultCPUSet(cset cpuset.CPUSet) {
sf.Lock()
defer sf.Unlock()
sf.cache.SetDefaultCPUSet(cset)
sf.storeState()
}
func (sf *stateFile) SetCPUAssignments(a ContainerCPUAssignments) {
sf.Lock()
defer sf.Unlock()
sf.cache.SetCPUAssignments(a)
sf.storeState()
}
func (sf *stateFile) Delete(containerID string) {
sf.Lock()
defer sf.Unlock()
sf.cache.Delete(containerID)
sf.storeState()
}
func (sf *stateFile) ClearState() {
sf.Lock()
defer sf.Unlock()
sf.cache.ClearState()
sf.storeState()
}

View File

@ -0,0 +1,481 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
func writeToStateFile(statefile string, content string) {
ioutil.WriteFile(statefile, []byte(content), 0644)
}
func stateEqual(t *testing.T, sf State, sm State) {
cpusetSf := sf.GetDefaultCPUSet()
cpusetSm := sm.GetDefaultCPUSet()
if !cpusetSf.Equals(cpusetSm) {
t.Errorf("State CPUSet mismatch. Have %v, want %v", cpusetSf, cpusetSm)
}
cpuassignmentSf := sf.GetCPUAssignments()
cpuassignmentSm := sm.GetCPUAssignments()
if !reflect.DeepEqual(cpuassignmentSf, cpuassignmentSm) {
t.Errorf("State CPU assigments mismatch. Have %s, want %s", cpuassignmentSf, cpuassignmentSm)
}
}
func stderrCapture(t *testing.T, f func() State) (bytes.Buffer, State) {
stderr := os.Stderr
readBuffer, writeBuffer, err := os.Pipe()
if err != nil {
t.Errorf("cannot create pipe: %v", err.Error())
}
os.Stderr = writeBuffer
var outputBuffer bytes.Buffer
state := f()
writeBuffer.Close()
io.Copy(&outputBuffer, readBuffer)
os.Stderr = stderr
return outputBuffer, state
}
func TestFileStateTryRestore(t *testing.T) {
flag.Set("alsologtostderr", "true")
flag.Parse()
testCases := []struct {
description string
stateFileContent string
policyName string
expErr string
expectedState *stateMemory
}{
{
"Invalid JSON - empty file",
"\n",
"none",
"state file: could not unmarshal, corrupted state file",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"Invalid JSON - invalid content",
"{",
"none",
"state file: could not unmarshal, corrupted state file",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"Try restore defaultCPUSet only",
`{"policyName": "none", "defaultCpuSet": "4-6"}`,
"none",
"",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(4, 5, 6),
},
},
{
"Try restore defaultCPUSet only - invalid name",
`{"policyName": "none", "defaultCpuSet" "4-6"}`,
"none",
"",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"Try restore assignments only",
`{
"policyName": "none",
"entries": {
"container1": "4-6",
"container2": "1-3"
}
}`,
"none",
"",
&stateMemory{
assignments: ContainerCPUAssignments{
"container1": cpuset.NewCPUSet(4, 5, 6),
"container2": cpuset.NewCPUSet(1, 2, 3),
},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"Try restore invalid policy name",
`{
"policyName": "A",
"defaultCpuSet": "0-7",
"entries": {}
}`,
"B",
"policy configured \"B\" != policy from state file \"A\"",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"Try restore invalid assignments",
`{"entries": }`,
"none",
"state file: could not unmarshal, corrupted state file",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"Try restore valid file",
`{
"policyName": "none",
"defaultCpuSet": "23-24",
"entries": {
"container1": "4-6",
"container2": "1-3"
}
}`,
"none",
"",
&stateMemory{
assignments: ContainerCPUAssignments{
"container1": cpuset.NewCPUSet(4, 5, 6),
"container2": cpuset.NewCPUSet(1, 2, 3),
},
defaultCPUSet: cpuset.NewCPUSet(23, 24),
},
},
{
"Try restore un-parsable defaultCPUSet ",
`{
"policyName": "none",
"defaultCpuSet": "2-sd"
}`,
"none",
"state file: could not parse state file",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"Try restore un-parsable assignments",
`{
"policyName": "none",
"defaultCpuSet": "23-24",
"entries": {
"container1": "p-6",
"container2": "1-3"
}
}`,
"none",
"state file: could not parse state file",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"TryRestoreState creates empty state file",
"",
"none",
"",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
}
for idx, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
sfilePath, err := ioutil.TempFile("/tmp", fmt.Sprintf("cpumanager_state_file_test_%d", idx))
if err != nil {
t.Errorf("cannot create temporary file: %q", err.Error())
}
// Don't create state file, let TryRestoreState figure out that is should create
if tc.stateFileContent != "" {
writeToStateFile(sfilePath.Name(), tc.stateFileContent)
}
// Always remove file - regardless of who created
defer os.Remove(sfilePath.Name())
logData, fileState := stderrCapture(t, func() State {
return NewFileState(sfilePath.Name(), tc.policyName)
})
if tc.expErr != "" {
if logData.String() != "" {
if !strings.Contains(logData.String(), tc.expErr) {
t.Errorf("TryRestoreState() error = %v, wantErr %v", logData.String(), tc.expErr)
return
}
} else {
t.Errorf("TryRestoreState() error = nil, wantErr %v", tc.expErr)
return
}
}
stateEqual(t, fileState, tc.expectedState)
})
}
}
func TestFileStateTryRestorePanic(t *testing.T) {
testCase := struct {
description string
wantPanic bool
panicMessage string
}{
"Panic creating file",
true,
"[cpumanager] state file not created",
}
t.Run(testCase.description, func(t *testing.T) {
sfilePath := path.Join("/invalid_path/to_some_dir", "cpumanager_state_file_test")
defer func() {
if err := recover(); err != nil {
if testCase.wantPanic {
if testCase.panicMessage == err {
t.Logf("TryRestoreState() got expected panic = %v", err)
return
}
t.Errorf("TryRestoreState() unexpected panic = %v, wantErr %v", err, testCase.panicMessage)
}
}
}()
NewFileState(sfilePath, "static")
})
}
func TestUpdateStateFile(t *testing.T) {
flag.Set("alsologtostderr", "true")
flag.Parse()
testCases := []struct {
description string
expErr string
expectedState *stateMemory
}{
{
"Save empty state",
"",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
{
"Save defaultCPUSet only",
"",
&stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(1, 6),
},
},
{
"Save assignments only",
"",
&stateMemory{
assignments: ContainerCPUAssignments{
"container1": cpuset.NewCPUSet(4, 5, 6),
"container2": cpuset.NewCPUSet(1, 2, 3),
},
defaultCPUSet: cpuset.NewCPUSet(),
},
},
}
for idx, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
sfilePath, err := ioutil.TempFile("/tmp", fmt.Sprintf("cpumanager_state_file_test_%d", idx))
defer os.Remove(sfilePath.Name())
if err != nil {
t.Errorf("cannot create temporary file: %q", err.Error())
}
fileState := stateFile{
stateFilePath: sfilePath.Name(),
policyName: "static",
cache: NewMemoryState(),
}
fileState.SetDefaultCPUSet(tc.expectedState.defaultCPUSet)
fileState.SetCPUAssignments(tc.expectedState.assignments)
logData, _ := stderrCapture(t, func() State {
fileState.storeState()
return &stateFile{}
})
errMsg := logData.String()
if tc.expErr != "" {
if errMsg != "" {
if errMsg != tc.expErr {
t.Errorf("UpdateStateFile() error = %v, wantErr %v", errMsg, tc.expErr)
return
}
} else {
t.Errorf("UpdateStateFile() error = nil, wantErr %v", tc.expErr)
return
}
} else {
if errMsg != "" {
t.Errorf("UpdateStateFile() error = %v, wantErr nil", errMsg)
return
}
}
newFileState := NewFileState(sfilePath.Name(), "static")
stateEqual(t, newFileState, tc.expectedState)
})
}
}
func TestHelpersStateFile(t *testing.T) {
testCases := []struct {
description string
defaultCPUset cpuset.CPUSet
containers map[string]cpuset.CPUSet
}{
{
description: "one container",
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
containers: map[string]cpuset.CPUSet{
"c1": cpuset.NewCPUSet(0, 1),
},
},
{
description: "two containers",
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
containers: map[string]cpuset.CPUSet{
"c1": cpuset.NewCPUSet(0, 1),
"c2": cpuset.NewCPUSet(2, 3, 4, 5),
},
},
{
description: "container with more cpus than is possible",
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
containers: map[string]cpuset.CPUSet{
"c1": cpuset.NewCPUSet(0, 10),
},
},
{
description: "container without assigned cpus",
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
containers: map[string]cpuset.CPUSet{
"c1": cpuset.NewCPUSet(),
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
sfFile, err := ioutil.TempFile("/tmp", "testHelpersStateFile")
defer os.Remove(sfFile.Name())
if err != nil {
t.Errorf("cannot create temporary test file: %q", err.Error())
}
state := NewFileState(sfFile.Name(), "static")
state.SetDefaultCPUSet(tc.defaultCPUset)
for containerName, containerCPUs := range tc.containers {
state.SetCPUSet(containerName, containerCPUs)
if cpus, _ := state.GetCPUSet(containerName); !cpus.Equals(containerCPUs) {
t.Errorf("state is inconsistant. Wants = %q Have = %q", containerCPUs, cpus)
}
state.Delete(containerName)
if cpus := state.GetCPUSetOrDefault(containerName); !cpus.Equals(tc.defaultCPUset) {
t.Error("deleted container still existing in state")
}
}
})
}
}
func TestClearStateStateFile(t *testing.T) {
testCases := []struct {
description string
defaultCPUset cpuset.CPUSet
containers map[string]cpuset.CPUSet
}{
{
description: "valid file",
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
containers: map[string]cpuset.CPUSet{
"c1": cpuset.NewCPUSet(0, 1),
"c2": cpuset.NewCPUSet(2, 3),
"c3": cpuset.NewCPUSet(4, 5),
},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
sfFile, err := ioutil.TempFile("/tmp", "testHelpersStateFile")
defer os.Remove(sfFile.Name())
if err != nil {
t.Errorf("cannot create temporary test file: %q", err.Error())
}
state := NewFileState(sfFile.Name(), "static")
state.SetDefaultCPUSet(testCase.defaultCPUset)
for containerName, containerCPUs := range testCase.containers {
state.SetCPUSet(containerName, containerCPUs)
}
state.ClearState()
if !cpuset.NewCPUSet().Equals(state.GetDefaultCPUSet()) {
t.Error("cleared state shoudn't has got information about available cpuset")
}
for containerName := range testCase.containers {
if !cpuset.NewCPUSet().Equals(state.GetCPUSetOrDefault(containerName)) {
t.Error("cleared state shoudn't has got information about containers")
}
}
})
}
}

View File

@ -0,0 +1,113 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"sync"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
type stateMemory struct {
sync.RWMutex
assignments ContainerCPUAssignments
defaultCPUSet cpuset.CPUSet
}
var _ State = &stateMemory{}
// NewMemoryState creates new State for keeping track of cpu/pod assignment
func NewMemoryState() State {
glog.Infof("[cpumanager] initializing new in-memory state store")
return &stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(),
}
}
func (s *stateMemory) GetCPUSet(containerID string) (cpuset.CPUSet, bool) {
s.RLock()
defer s.RUnlock()
res, ok := s.assignments[containerID]
return res.Clone(), ok
}
func (s *stateMemory) GetDefaultCPUSet() cpuset.CPUSet {
s.RLock()
defer s.RUnlock()
return s.defaultCPUSet.Clone()
}
func (s *stateMemory) GetCPUSetOrDefault(containerID string) cpuset.CPUSet {
s.RLock()
defer s.RUnlock()
if res, ok := s.GetCPUSet(containerID); ok {
return res
}
return s.GetDefaultCPUSet()
}
func (s *stateMemory) GetCPUAssignments() ContainerCPUAssignments {
s.RLock()
defer s.RUnlock()
return s.assignments.Clone()
}
func (s *stateMemory) SetCPUSet(containerID string, cset cpuset.CPUSet) {
s.Lock()
defer s.Unlock()
s.assignments[containerID] = cset
glog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset)
}
func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) {
s.Lock()
defer s.Unlock()
s.defaultCPUSet = cset
glog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset)
}
func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) {
s.Lock()
defer s.Unlock()
s.assignments = a.Clone()
glog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a)
}
func (s *stateMemory) Delete(containerID string) {
s.Lock()
defer s.Unlock()
delete(s.assignments, containerID)
glog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID)
}
func (s *stateMemory) ClearState() {
s.Lock()
defer s.Unlock()
s.defaultCPUSet = cpuset.CPUSet{}
s.assignments = make(ContainerCPUAssignments)
glog.V(2).Infof("[cpumanager] cleared state")
}

View File

@ -0,0 +1,38 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"topology.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/cm/cpuset:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["topology_test.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology",
library = ":go_default_library",
deps = ["//vendor/github.com/google/cadvisor/info/v1:go_default_library"],
)

View File

@ -0,0 +1,18 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package topology contains helpers for the CPU manager.
package topology // import "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"

View File

@ -0,0 +1,197 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topology
import (
"fmt"
"sort"
"github.com/golang/glog"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
// CPUDetails is a map from CPU ID to Core ID and Socket ID.
type CPUDetails map[int]CPUInfo
// CPUTopology contains details of node cpu, where :
// CPU - logical CPU, cadvisor - thread
// Core - physical CPU, cadvisor - Core
// Socket - socket, cadvisor - Node
type CPUTopology struct {
NumCPUs int
NumCores int
NumSockets int
CPUDetails CPUDetails
}
// CPUsPerCore returns the number of logical CPUs are associated with
// each core.
func (topo *CPUTopology) CPUsPerCore() int {
if topo.NumCores == 0 {
return 0
}
return topo.NumCPUs / topo.NumCores
}
// CPUsPerSocket returns the number of logical CPUs are associated with
// each socket.
func (topo *CPUTopology) CPUsPerSocket() int {
if topo.NumSockets == 0 {
return 0
}
return topo.NumCPUs / topo.NumSockets
}
// CPUInfo contains the socket and core IDs associated with a CPU.
type CPUInfo struct {
SocketID int
CoreID int
}
// KeepOnly returns a new CPUDetails object with only the supplied cpus.
func (d CPUDetails) KeepOnly(cpus cpuset.CPUSet) CPUDetails {
result := CPUDetails{}
for cpu, info := range d {
if cpus.Contains(cpu) {
result[cpu] = info
}
}
return result
}
// Sockets returns all of the socket IDs associated with the CPUs in this
// CPUDetails.
func (d CPUDetails) Sockets() cpuset.CPUSet {
b := cpuset.NewBuilder()
for _, info := range d {
b.Add(info.SocketID)
}
return b.Result()
}
// CPUsInSocket returns all of the logical CPU IDs associated with the
// given socket ID in this CPUDetails.
func (d CPUDetails) CPUsInSocket(id int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for cpu, info := range d {
if info.SocketID == id {
b.Add(cpu)
}
}
return b.Result()
}
// Cores returns all of the core IDs associated with the CPUs in this
// CPUDetails.
func (d CPUDetails) Cores() cpuset.CPUSet {
b := cpuset.NewBuilder()
for _, info := range d {
b.Add(info.CoreID)
}
return b.Result()
}
// CoresInSocket returns all of the core IDs associated with the given
// socket ID in this CPUDetails.
func (d CPUDetails) CoresInSocket(id int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for _, info := range d {
if info.SocketID == id {
b.Add(info.CoreID)
}
}
return b.Result()
}
// CPUs returns all of the logical CPU IDs in this CPUDetails.
func (d CPUDetails) CPUs() cpuset.CPUSet {
b := cpuset.NewBuilder()
for cpuID := range d {
b.Add(cpuID)
}
return b.Result()
}
// CPUsInCore returns all of the logical CPU IDs associated with the
// given core ID in this CPUDetails.
func (d CPUDetails) CPUsInCore(id int) cpuset.CPUSet {
b := cpuset.NewBuilder()
for cpu, info := range d {
if info.CoreID == id {
b.Add(cpu)
}
}
return b.Result()
}
// Discover returns CPUTopology based on cadvisor node info
func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) {
if machineInfo.NumCores == 0 {
return nil, fmt.Errorf("could not detect number of cpus")
}
CPUDetails := CPUDetails{}
numCPUs := machineInfo.NumCores
numPhysicalCores := 0
var coreID int
var err error
for _, socket := range machineInfo.Topology {
numPhysicalCores += len(socket.Cores)
for _, core := range socket.Cores {
if coreID, err = getUniqueCoreID(core.Threads); err != nil {
glog.Errorf("could not get unique coreID for socket: %d core %d threads: %v",
socket.Id, core.Id, core.Threads)
return nil, err
}
for _, cpu := range core.Threads {
CPUDetails[cpu] = CPUInfo{
CoreID: coreID,
SocketID: socket.Id,
}
}
}
}
return &CPUTopology{
NumCPUs: numCPUs,
NumSockets: len(machineInfo.Topology),
NumCores: numPhysicalCores,
CPUDetails: CPUDetails,
}, nil
}
// getUniqueCoreID computes coreId as the lowest cpuID
// for a given Threads []int slice. This will assure that coreID's are
// platform unique (opposite to what cAdvisor reports - socket unique)
func getUniqueCoreID(threads []int) (coreID int, err error) {
err = nil
if len(threads) == 0 {
return 0, fmt.Errorf("no cpus provided")
}
if len(threads) != cpuset.NewCPUSet(threads...).Size() {
return 0, fmt.Errorf("cpus provided are not unique")
}
tmpThreads := make([]int, len(threads))
copy(tmpThreads, threads)
sort.Ints(tmpThreads)
return tmpThreads[0], err
}

View File

@ -0,0 +1,201 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topology
import (
"reflect"
"testing"
cadvisorapi "github.com/google/cadvisor/info/v1"
)
func Test_Discover(t *testing.T) {
tests := []struct {
name string
args *cadvisorapi.MachineInfo
want *CPUTopology
wantErr bool
}{
{
name: "FailNumCores",
args: &cadvisorapi.MachineInfo{
NumCores: 0,
},
want: &CPUTopology{},
wantErr: true,
},
{
name: "OneSocketHT",
args: &cadvisorapi.MachineInfo{
NumCores: 8,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0, 4}},
{Id: 1, Threads: []int{1, 5}},
{Id: 2, Threads: []int{2, 6}},
{Id: 3, Threads: []int{3, 7}},
},
},
},
},
want: &CPUTopology{
NumCPUs: 8,
NumSockets: 1,
NumCores: 4,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 0},
4: {CoreID: 0, SocketID: 0},
5: {CoreID: 1, SocketID: 0},
6: {CoreID: 2, SocketID: 0},
7: {CoreID: 3, SocketID: 0},
},
},
wantErr: false,
},
{
name: "DualSocketNoHT",
args: &cadvisorapi.MachineInfo{
NumCores: 4,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0}},
{Id: 2, Threads: []int{2}},
},
},
{Id: 1,
Cores: []cadvisorapi.Core{
{Id: 1, Threads: []int{1}},
{Id: 3, Threads: []int{3}},
},
},
},
},
want: &CPUTopology{
NumCPUs: 4,
NumSockets: 2,
NumCores: 4,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 1},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 1},
},
},
wantErr: false,
},
{
name: "DualSocketHT - non unique Core'ID's",
args: &cadvisorapi.MachineInfo{
NumCores: 12,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0, 6}},
{Id: 1, Threads: []int{1, 7}},
{Id: 2, Threads: []int{2, 8}},
},
},
{Id: 1,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{3, 9}},
{Id: 1, Threads: []int{4, 10}},
{Id: 2, Threads: []int{5, 11}},
},
},
},
},
want: &CPUTopology{
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 1},
4: {CoreID: 4, SocketID: 1},
5: {CoreID: 5, SocketID: 1},
6: {CoreID: 0, SocketID: 0},
7: {CoreID: 1, SocketID: 0},
8: {CoreID: 2, SocketID: 0},
9: {CoreID: 3, SocketID: 1},
10: {CoreID: 4, SocketID: 1},
11: {CoreID: 5, SocketID: 1},
},
},
wantErr: false,
},
{
name: "OneSocketHT fail",
args: &cadvisorapi.MachineInfo{
NumCores: 8,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0, 4}},
{Id: 1, Threads: []int{1, 5}},
{Id: 2, Threads: []int{2, 2}}, // Wrong case - should fail here
{Id: 3, Threads: []int{3, 7}},
},
},
},
},
want: &CPUTopology{},
wantErr: true,
},
{
name: "OneSocketHT fail",
args: &cadvisorapi.MachineInfo{
NumCores: 8,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0, 4}},
{Id: 1, Threads: []int{1, 5}},
{Id: 2, Threads: []int{2, 6}},
{Id: 3, Threads: []int{}}, // Wrong case - should fail here
},
},
},
},
want: &CPUTopology{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Discover(tt.args)
if err != nil {
if tt.wantErr {
t.Logf("Discover() expected error = %v", err)
} else {
t.Errorf("Discover() error = %v, wantErr %v", err, tt.wantErr)
}
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Discover() = %v, want %v", got, tt.want)
}
})
}
}