mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor updates
This commit is contained in:
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD
generated
vendored
@ -14,7 +14,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper/qos:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager/state:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
@ -36,10 +36,9 @@ go_test(
|
||||
"policy_static_test.go",
|
||||
"policy_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager/state:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
@ -47,7 +46,6 @@ go_test(
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go
generated
vendored
@ -86,7 +86,7 @@ func (a *cpuAccumulator) freeCores() []int {
|
||||
|
||||
// Returns CPU IDs as a slice sorted by:
|
||||
// - socket affinity with result
|
||||
// - number of CPUs available on the same sockett
|
||||
// - number of CPUs available on the same socket
|
||||
// - number of CPUs available on the same core
|
||||
// - socket ID.
|
||||
// - core ID.
|
||||
|
41
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
@ -98,13 +98,7 @@ type manager struct {
|
||||
var _ Manager = &manager{}
|
||||
|
||||
// NewManager creates new cpu manager based on provided policy
|
||||
func NewManager(
|
||||
cpuPolicyName string,
|
||||
reconcilePeriod time.Duration,
|
||||
machineInfo *cadvisorapi.MachineInfo,
|
||||
nodeAllocatableReservation v1.ResourceList,
|
||||
stateFileDirecory string,
|
||||
) (Manager, error) {
|
||||
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirecory string) (Manager, error) {
|
||||
var policy Policy
|
||||
|
||||
switch policyName(cpuPolicyName) {
|
||||
@ -120,18 +114,16 @@ func NewManager(
|
||||
glog.Infof("[cpumanager] detected CPU topology: %v", topo)
|
||||
reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU]
|
||||
if !ok {
|
||||
// The static policy cannot initialize without this information. Panic!
|
||||
panic("[cpumanager] unable to determine reserved CPU resources for static policy")
|
||||
// The static policy cannot initialize without this information.
|
||||
return nil, fmt.Errorf("[cpumanager] unable to determine reserved CPU resources for static policy")
|
||||
}
|
||||
if reservedCPUs.IsZero() {
|
||||
// Panic!
|
||||
//
|
||||
// The static policy requires this to be nonzero. Zero CPU reservation
|
||||
// would allow the shared pool to be completely exhausted. At that point
|
||||
// either we would violate our guarantee of exclusivity or need to evict
|
||||
// any pod that has at least one container that requires zero CPUs.
|
||||
// See the comments in policy_static.go for more details.
|
||||
panic("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero")
|
||||
return nil, fmt.Errorf("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero")
|
||||
}
|
||||
|
||||
// Take the ceiling of the reservation, since fractional CPUs cannot be
|
||||
@ -160,8 +152,8 @@ func NewManager(
|
||||
}
|
||||
|
||||
func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) {
|
||||
glog.Infof("[cpumanger] starting with %s policy", m.policy.Name())
|
||||
glog.Infof("[cpumanger] reconciling every %v", m.reconcilePeriod)
|
||||
glog.Infof("[cpumanager] starting with %s policy", m.policy.Name())
|
||||
glog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod)
|
||||
|
||||
m.activePods = activePods
|
||||
m.podStatusProvider = podStatusProvider
|
||||
@ -242,6 +234,25 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
continue
|
||||
}
|
||||
|
||||
// Check whether container is present in state, there may be 3 reasons why it's not present:
|
||||
// - policy does not want to track the container
|
||||
// - kubelet has just been restarted - and there is no previous state file
|
||||
// - container has been removed from state by RemoveContainer call (DeletionTimestamp is set)
|
||||
if _, ok := m.state.GetCPUSet(containerID); !ok {
|
||||
if status.Phase == v1.PodRunning && pod.DeletionTimestamp == nil {
|
||||
glog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
err := m.AddContainer(pod, &container, containerID)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
}
|
||||
} else {
|
||||
// if DeletionTimestamp is set, pod has already been removed from state
|
||||
// skip the pod/container since it's not running and will be deleted soon
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
cset := m.state.GetCPUSetOrDefault(containerID)
|
||||
if cset.IsEmpty() {
|
||||
// NOTE: This should not happen outside of tests.
|
||||
|
57
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
generated
vendored
@ -28,9 +28,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"os"
|
||||
@ -118,28 +117,6 @@ func (psp mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool
|
||||
return psp.podStatus, psp.found
|
||||
}
|
||||
|
||||
type mockPodKiller struct {
|
||||
killedPods []*v1.Pod
|
||||
}
|
||||
|
||||
func (f *mockPodKiller) killPodNow(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error {
|
||||
f.killedPods = append(f.killedPods, pod)
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockPodProvider struct {
|
||||
pods []*v1.Pod
|
||||
}
|
||||
|
||||
func (f *mockPodProvider) getPods() []*v1.Pod {
|
||||
return f.pods
|
||||
}
|
||||
|
||||
type mockRecorder struct{}
|
||||
|
||||
func (r *mockRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func makePod(cpuRequest, cpuLimit string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
@ -161,20 +138,6 @@ func makePod(cpuRequest, cpuLimit string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
// CpuAllocatable must be <= CpuCapacity
|
||||
func prepareCPUNodeStatus(CPUCapacity, CPUAllocatable string) v1.NodeStatus {
|
||||
nodestatus := v1.NodeStatus{
|
||||
Capacity: make(v1.ResourceList, 1),
|
||||
Allocatable: make(v1.ResourceList, 1),
|
||||
}
|
||||
cpucap, _ := resource.ParseQuantity(CPUCapacity)
|
||||
cpuall, _ := resource.ParseQuantity(CPUAllocatable)
|
||||
|
||||
nodestatus.Capacity[v1.ResourceCPU] = cpucap
|
||||
nodestatus.Allocatable[v1.ResourceCPU] = cpuall
|
||||
return nodestatus
|
||||
}
|
||||
|
||||
func TestCPUManagerAdd(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
@ -234,7 +197,6 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
cpuPolicyName string
|
||||
nodeAllocatableReservation v1.ResourceList
|
||||
isTopologyBroken bool
|
||||
panicMsg string
|
||||
expectedPolicy string
|
||||
expectedError error
|
||||
skipIfPermissionsError bool
|
||||
@ -270,14 +232,14 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
description: "static policy - broken reservation",
|
||||
cpuPolicyName: "static",
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
panicMsg: "unable to determine reserved CPU resources for static policy",
|
||||
expectedError: fmt.Errorf("unable to determine reserved CPU resources for static policy"),
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
{
|
||||
description: "static policy - no CPU resources",
|
||||
cpuPolicyName: "static",
|
||||
nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)},
|
||||
panicMsg: "the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero",
|
||||
expectedError: fmt.Errorf("the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero"),
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
}
|
||||
@ -319,19 +281,6 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
t.Errorf("cannot create state file: %s", err.Error())
|
||||
}
|
||||
defer os.RemoveAll(sDir)
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
if testCase.panicMsg != "" {
|
||||
if !strings.Contains(err.(string), testCase.panicMsg) {
|
||||
t.Errorf("Unexpected panic message. Have: %q wants %q", err, testCase.panicMsg)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Unexpected panic: %q", err)
|
||||
}
|
||||
} else if testCase.panicMsg != "" {
|
||||
t.Error("Expected panic hasn't been raised")
|
||||
}
|
||||
}()
|
||||
|
||||
mgr, err := NewManager(testCase.cpuPolicyName, 5*time.Second, machineInfo, testCase.nodeAllocatableReservation, sDir)
|
||||
if testCase.expectedError != nil {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
||||
type Policy interface {
|
||||
Name() string
|
||||
Start(s state.State)
|
||||
// AddContainer call is idempotent
|
||||
AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error
|
||||
// RemoveContainer call is idempotent
|
||||
RemoveContainer(s state.State, containerID string) error
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
@ -156,9 +156,15 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet {
|
||||
}
|
||||
|
||||
func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
|
||||
glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
if numCPUs := guaranteedCPUs(pod, container); numCPUs != 0 {
|
||||
glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
// container belongs in an exclusively allocated pool
|
||||
|
||||
if _, ok := s.GetCPUSet(containerID); ok {
|
||||
glog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
cpuset, err := p.allocateCPUs(s, numCPUs)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static_test.go
generated
vendored
@ -93,10 +93,10 @@ func TestStaticPolicyStart(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
if !testCase.expPanic {
|
||||
t.Errorf("unexpected panic occured: %q", err)
|
||||
t.Errorf("unexpected panic occurred: %q", err)
|
||||
}
|
||||
} else if testCase.expPanic {
|
||||
t.Error("expected panic doesn't occured")
|
||||
t.Error("expected panic doesn't occurred")
|
||||
}
|
||||
}()
|
||||
policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs).(*staticPolicy)
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD
generated
vendored
@ -18,8 +18,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["state_file_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//pkg/kubelet/cm/cpuset:go_default_library"],
|
||||
)
|
||||
|
||||
|
87
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go
generated
vendored
87
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go
generated
vendored
@ -51,9 +51,10 @@ func NewFileState(filePath string, policyName string) State {
|
||||
|
||||
if err := stateFile.tryRestoreState(); err != nil {
|
||||
// could not restore state, init new state file
|
||||
glog.Infof("[cpumanager] state file: initializing empty state file - reason: \"%s\"", err)
|
||||
stateFile.cache.ClearState()
|
||||
stateFile.storeState()
|
||||
msg := fmt.Sprintf("[cpumanager] state file: unable to restore state from disk (%s)\n", err.Error()) +
|
||||
"Panicking because we cannot guarantee sane CPU affinity for existing containers.\n" +
|
||||
fmt.Sprintf("Please drain this node and delete the CPU manager state file \"%s\" before restarting Kubelet.", stateFile.stateFilePath)
|
||||
panic(msg)
|
||||
}
|
||||
|
||||
return stateFile
|
||||
@ -73,45 +74,51 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
|
||||
var content []byte
|
||||
|
||||
if content, err = ioutil.ReadFile(sf.stateFilePath); os.IsNotExist(err) {
|
||||
// Create file
|
||||
if _, err = os.Create(sf.stateFilePath); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: unable to create state file \"%s\":%s", sf.stateFilePath, err.Error())
|
||||
panic("[cpumanager] state file not created")
|
||||
}
|
||||
glog.Infof("[cpumanager] state file: created empty state file \"%s\"", sf.stateFilePath)
|
||||
} else {
|
||||
// File exists - try to read
|
||||
var readState stateFileData
|
||||
content, err = ioutil.ReadFile(sf.stateFilePath)
|
||||
|
||||
if err = json.Unmarshal(content, &readState); err != nil {
|
||||
glog.Warningf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
if sf.policyName != readState.PolicyName {
|
||||
return fmt.Errorf("policy configured \"%s\" != policy from state file \"%s\"", sf.policyName, readState.PolicyName)
|
||||
}
|
||||
|
||||
if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil {
|
||||
glog.Warningf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet)
|
||||
return err
|
||||
}
|
||||
|
||||
for containerID, cpuString := range readState.Entries {
|
||||
if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil {
|
||||
glog.Warningf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString)
|
||||
return err
|
||||
}
|
||||
tmpAssignments[containerID] = tmpContainerCPUSet
|
||||
}
|
||||
|
||||
sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet)
|
||||
sf.cache.SetCPUAssignments(tmpAssignments)
|
||||
|
||||
glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath)
|
||||
glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String())
|
||||
// If the state file does not exist or has zero length, write a new file.
|
||||
if os.IsNotExist(err) || len(content) == 0 {
|
||||
sf.storeState()
|
||||
glog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fail on any other file read error.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// File exists; try to read it.
|
||||
var readState stateFileData
|
||||
|
||||
if err = json.Unmarshal(content, &readState); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
if sf.policyName != readState.PolicyName {
|
||||
return fmt.Errorf("policy configured \"%s\" != policy from state file \"%s\"", sf.policyName, readState.PolicyName)
|
||||
}
|
||||
|
||||
if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet)
|
||||
return err
|
||||
}
|
||||
|
||||
for containerID, cpuString := range readState.Entries {
|
||||
if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString)
|
||||
return err
|
||||
}
|
||||
tmpAssignments[containerID] = tmpContainerCPUSet
|
||||
}
|
||||
|
||||
sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet)
|
||||
sf.cache.SetCPUAssignments(tmpAssignments)
|
||||
|
||||
glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath)
|
||||
glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
93
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file_test.go
generated
vendored
93
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file_test.go
generated
vendored
@ -45,7 +45,7 @@ func stateEqual(t *testing.T, sf State, sm State) {
|
||||
cpuassignmentSf := sf.GetCPUAssignments()
|
||||
cpuassignmentSm := sm.GetCPUAssignments()
|
||||
if !reflect.DeepEqual(cpuassignmentSf, cpuassignmentSm) {
|
||||
t.Errorf("State CPU assigments mismatch. Have %s, want %s", cpuassignmentSf, cpuassignmentSm)
|
||||
t.Errorf("State CPU assignments mismatch. Have %s, want %s", cpuassignmentSf, cpuassignmentSm)
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,33 +77,31 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
stateFileContent string
|
||||
policyName string
|
||||
expErr string
|
||||
expPanic bool
|
||||
expectedState *stateMemory
|
||||
}{
|
||||
{
|
||||
"Invalid JSON - empty file",
|
||||
"Invalid JSON - one byte file",
|
||||
"\n",
|
||||
"none",
|
||||
"state file: could not unmarshal, corrupted state file",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
},
|
||||
"[cpumanager] state file: unable to restore state from disk (unexpected end of JSON input)",
|
||||
true,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Invalid JSON - invalid content",
|
||||
"{",
|
||||
"none",
|
||||
"state file: could not unmarshal, corrupted state file",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
},
|
||||
"[cpumanager] state file: unable to restore state from disk (unexpected end of JSON input)",
|
||||
true,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Try restore defaultCPUSet only",
|
||||
`{"policyName": "none", "defaultCpuSet": "4-6"}`,
|
||||
"none",
|
||||
"",
|
||||
false,
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(4, 5, 6),
|
||||
@ -113,11 +111,9 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
"Try restore defaultCPUSet only - invalid name",
|
||||
`{"policyName": "none", "defaultCpuSet" "4-6"}`,
|
||||
"none",
|
||||
"",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
},
|
||||
`[cpumanager] state file: unable to restore state from disk (invalid character '"' after object key)`,
|
||||
true,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Try restore assignments only",
|
||||
@ -130,6 +126,7 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
}`,
|
||||
"none",
|
||||
"",
|
||||
false,
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{
|
||||
"container1": cpuset.NewCPUSet(4, 5, 6),
|
||||
@ -146,21 +143,17 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
"entries": {}
|
||||
}`,
|
||||
"B",
|
||||
"policy configured \"B\" != policy from state file \"A\"",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
},
|
||||
`[cpumanager] state file: unable to restore state from disk (policy configured "B" != policy from state file "A")`,
|
||||
true,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Try restore invalid assignments",
|
||||
`{"entries": }`,
|
||||
"none",
|
||||
"state file: could not unmarshal, corrupted state file",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
},
|
||||
"[cpumanager] state file: unable to restore state from disk (invalid character '}' looking for beginning of value)",
|
||||
true,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Try restore valid file",
|
||||
@ -174,6 +167,7 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
}`,
|
||||
"none",
|
||||
"",
|
||||
false,
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{
|
||||
"container1": cpuset.NewCPUSet(4, 5, 6),
|
||||
@ -189,11 +183,9 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
"defaultCpuSet": "2-sd"
|
||||
}`,
|
||||
"none",
|
||||
"state file: could not parse state file",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
},
|
||||
`[cpumanager] state file: unable to restore state from disk (strconv.Atoi: parsing "sd": invalid syntax)`,
|
||||
true,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Try restore un-parsable assignments",
|
||||
@ -206,17 +198,16 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
}
|
||||
}`,
|
||||
"none",
|
||||
"state file: could not parse state file",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
},
|
||||
`[cpumanager] state file: unable to restore state from disk (strconv.Atoi: parsing "p": invalid syntax)`,
|
||||
true,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"TryRestoreState creates empty state file",
|
||||
"tryRestoreState creates empty state file",
|
||||
"",
|
||||
"none",
|
||||
"",
|
||||
false,
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
@ -226,11 +217,23 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
|
||||
for idx, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
defer func() {
|
||||
if tc.expPanic {
|
||||
r := recover()
|
||||
panicMsg := r.(string)
|
||||
if !strings.HasPrefix(panicMsg, tc.expErr) {
|
||||
t.Fatalf(`expected panic "%s" but got "%s"`, tc.expErr, panicMsg)
|
||||
} else {
|
||||
t.Logf(`got expected panic "%s"`, panicMsg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sfilePath, err := ioutil.TempFile("/tmp", fmt.Sprintf("cpumanager_state_file_test_%d", idx))
|
||||
if err != nil {
|
||||
t.Errorf("cannot create temporary file: %q", err.Error())
|
||||
}
|
||||
// Don't create state file, let TryRestoreState figure out that is should create
|
||||
// Don't create state file, let tryRestoreState figure out that is should create
|
||||
if tc.stateFileContent != "" {
|
||||
writeToStateFile(sfilePath.Name(), tc.stateFileContent)
|
||||
}
|
||||
@ -245,11 +248,11 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
if tc.expErr != "" {
|
||||
if logData.String() != "" {
|
||||
if !strings.Contains(logData.String(), tc.expErr) {
|
||||
t.Errorf("TryRestoreState() error = %v, wantErr %v", logData.String(), tc.expErr)
|
||||
t.Errorf("tryRestoreState() error = %v, wantErr %v", logData.String(), tc.expErr)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TryRestoreState() error = nil, wantErr %v", tc.expErr)
|
||||
t.Errorf("tryRestoreState() error = nil, wantErr %v", tc.expErr)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -268,7 +271,7 @@ func TestFileStateTryRestorePanic(t *testing.T) {
|
||||
}{
|
||||
"Panic creating file",
|
||||
true,
|
||||
"[cpumanager] state file not created",
|
||||
"[cpumanager] state file not written",
|
||||
}
|
||||
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
@ -277,10 +280,10 @@ func TestFileStateTryRestorePanic(t *testing.T) {
|
||||
if err := recover(); err != nil {
|
||||
if testCase.wantPanic {
|
||||
if testCase.panicMessage == err {
|
||||
t.Logf("TryRestoreState() got expected panic = %v", err)
|
||||
t.Logf("tryRestoreState() got expected panic = %v", err)
|
||||
return
|
||||
}
|
||||
t.Errorf("TryRestoreState() unexpected panic = %v, wantErr %v", err, testCase.panicMessage)
|
||||
t.Errorf("tryRestoreState() unexpected panic = %v, wantErr %v", err, testCase.panicMessage)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -423,7 +426,7 @@ func TestHelpersStateFile(t *testing.T) {
|
||||
for containerName, containerCPUs := range tc.containers {
|
||||
state.SetCPUSet(containerName, containerCPUs)
|
||||
if cpus, _ := state.GetCPUSet(containerName); !cpus.Equals(containerCPUs) {
|
||||
t.Errorf("state is inconsistant. Wants = %q Have = %q", containerCPUs, cpus)
|
||||
t.Errorf("state is inconsistent. Wants = %q Have = %q", containerCPUs, cpus)
|
||||
}
|
||||
state.Delete(containerName)
|
||||
if cpus := state.GetCPUSetOrDefault(containerName); !cpus.Equals(tc.defaultCPUset) {
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD
generated
vendored
@ -32,7 +32,6 @@ filegroup(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["topology_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//vendor/github.com/google/cadvisor/info/v1:go_default_library"],
|
||||
)
|
||||
|
Reference in New Issue
Block a user