mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD
generated
vendored
@ -20,10 +20,10 @@ go_library(
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -42,11 +42,11 @@ go_test(
|
||||
"//pkg/kubelet/cm/cpumanager/state:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
@ -160,7 +160,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
|
||||
// least a socket's-worth of CPUs.
|
||||
for _, s := range acc.freeSockets() {
|
||||
if acc.needs(acc.topo.CPUsPerSocket()) {
|
||||
glog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s)
|
||||
klog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s)
|
||||
acc.take(acc.details.CPUsInSocket(s))
|
||||
if acc.isSatisfied() {
|
||||
return acc.result, nil
|
||||
@ -172,7 +172,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
|
||||
// a core's-worth of CPUs.
|
||||
for _, c := range acc.freeCores() {
|
||||
if acc.needs(acc.topo.CPUsPerCore()) {
|
||||
glog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c)
|
||||
klog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c)
|
||||
acc.take(acc.details.CPUsInCore(c))
|
||||
if acc.isSatisfied() {
|
||||
return acc.result, nil
|
||||
@ -184,7 +184,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
|
||||
// on the same sockets as the whole cores we have already taken in this
|
||||
// allocation.
|
||||
for _, c := range acc.freeCPUs() {
|
||||
glog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c)
|
||||
klog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c)
|
||||
if acc.needs(1) {
|
||||
acc.take(cpuset.NewCPUSet(c))
|
||||
}
|
||||
|
57
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go
generated
vendored
@ -22,10 +22,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
@ -33,7 +33,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"path"
|
||||
)
|
||||
|
||||
// ActivePodsFunc is a function that returns a list of pods to reconcile.
|
||||
@ -45,8 +44,8 @@ type runtimeService interface {
|
||||
|
||||
type policyName string
|
||||
|
||||
// CPUManagerStateFileName is the name file name where cpu manager stores it's state
|
||||
const CPUManagerStateFileName = "cpu_manager_state"
|
||||
// cpuManagerStateFileName is the name file name where cpu manager stores it's state
|
||||
const cpuManagerStateFileName = "cpu_manager_state"
|
||||
|
||||
// Manager interface provides methods for Kubelet to manage pod cpus.
|
||||
type Manager interface {
|
||||
@ -98,7 +97,7 @@ type manager struct {
|
||||
var _ Manager = &manager{}
|
||||
|
||||
// NewManager creates new cpu manager based on provided policy
|
||||
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirecory string) (Manager, error) {
|
||||
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string) (Manager, error) {
|
||||
var policy Policy
|
||||
|
||||
switch policyName(cpuPolicyName) {
|
||||
@ -111,7 +110,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.Infof("[cpumanager] detected CPU topology: %v", topo)
|
||||
klog.Infof("[cpumanager] detected CPU topology: %v", topo)
|
||||
reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU]
|
||||
if !ok {
|
||||
// The static policy cannot initialize without this information.
|
||||
@ -133,13 +132,14 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
||||
policy = NewStaticPolicy(topo, numReservedCPUs)
|
||||
|
||||
default:
|
||||
glog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone)
|
||||
klog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone)
|
||||
policy = NewNonePolicy()
|
||||
}
|
||||
|
||||
stateImpl := state.NewFileState(
|
||||
path.Join(stateFileDirecory, CPUManagerStateFileName),
|
||||
policy.Name())
|
||||
stateImpl, err := state.NewCheckpointState(stateFileDirectory, cpuManagerStateFileName, policy.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
manager := &manager{
|
||||
policy: policy,
|
||||
@ -152,8 +152,8 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
||||
}
|
||||
|
||||
func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) {
|
||||
glog.Infof("[cpumanager] starting with %s policy", m.policy.Name())
|
||||
glog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod)
|
||||
klog.Infof("[cpumanager] starting with %s policy", m.policy.Name())
|
||||
klog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod)
|
||||
|
||||
m.activePods = activePods
|
||||
m.podStatusProvider = podStatusProvider
|
||||
@ -170,7 +170,7 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e
|
||||
m.Lock()
|
||||
err := m.policy.AddContainer(m.state, p, c, containerID)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] AddContainer error: %v", err)
|
||||
klog.Errorf("[cpumanager] AddContainer error: %v", err)
|
||||
m.Unlock()
|
||||
return err
|
||||
}
|
||||
@ -180,13 +180,17 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e
|
||||
if !cpus.IsEmpty() {
|
||||
err = m.updateContainerCPUSet(containerID, cpus)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] AddContainer error: %v", err)
|
||||
return err
|
||||
klog.Errorf("[cpumanager] AddContainer error: %v", err)
|
||||
m.Lock()
|
||||
err := m.policy.RemoveContainer(m.state, containerID)
|
||||
if err != nil {
|
||||
klog.Errorf("[cpumanager] AddContainer rollback state error: %v", err)
|
||||
}
|
||||
m.Unlock()
|
||||
}
|
||||
} else {
|
||||
glog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty")
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -196,7 +200,7 @@ func (m *manager) RemoveContainer(containerID string) error {
|
||||
|
||||
err := m.policy.RemoveContainer(m.state, containerID)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] RemoveContainer error: %v", err)
|
||||
klog.Errorf("[cpumanager] RemoveContainer error: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -222,14 +226,14 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
for _, container := range allContainers {
|
||||
status, ok := m.podStatusProvider.GetPodStatus(pod.UID)
|
||||
if !ok {
|
||||
glog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name)
|
||||
klog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
|
||||
break
|
||||
}
|
||||
|
||||
containerID, err := findContainerIDByName(&status, container.Name)
|
||||
if err != nil {
|
||||
glog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err)
|
||||
klog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
|
||||
continue
|
||||
}
|
||||
@ -240,11 +244,12 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
// - container has been removed from state by RemoveContainer call (DeletionTimestamp is set)
|
||||
if _, ok := m.state.GetCPUSet(containerID); !ok {
|
||||
if status.Phase == v1.PodRunning && pod.DeletionTimestamp == nil {
|
||||
glog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
klog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
err := m.AddContainer(pod, &container, containerID)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err)
|
||||
klog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// if DeletionTimestamp is set, pod has already been removed from state
|
||||
@ -256,15 +261,15 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
cset := m.state.GetCPUSetOrDefault(containerID)
|
||||
if cset.IsEmpty() {
|
||||
// NOTE: This should not happen outside of tests.
|
||||
glog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name)
|
||||
klog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset)
|
||||
klog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset)
|
||||
err = m.updateContainerCPUSet(containerID, cset)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err)
|
||||
klog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
continue
|
||||
}
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
generated
vendored
@ -23,16 +23,18 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"os"
|
||||
)
|
||||
|
||||
type mockState struct {
|
||||
@ -139,40 +141,56 @@ func makePod(cpuRequest, cpuLimit string) *v1.Pod {
|
||||
}
|
||||
|
||||
func TestCPUManagerAdd(t *testing.T) {
|
||||
testPolicy := NewStaticPolicy(
|
||||
&topology.CPUTopology{
|
||||
NumCPUs: 4,
|
||||
NumSockets: 1,
|
||||
NumCores: 4,
|
||||
CPUDetails: map[int]topology.CPUInfo{
|
||||
0: {CoreID: 0, SocketID: 0},
|
||||
1: {CoreID: 1, SocketID: 0},
|
||||
2: {CoreID: 2, SocketID: 0},
|
||||
3: {CoreID: 3, SocketID: 0},
|
||||
},
|
||||
}, 0)
|
||||
testCases := []struct {
|
||||
description string
|
||||
regErr error
|
||||
updateErr error
|
||||
policy Policy
|
||||
expCPUSet cpuset.CPUSet
|
||||
expErr error
|
||||
}{
|
||||
{
|
||||
description: "cpu manager add - no error",
|
||||
regErr: nil,
|
||||
updateErr: nil,
|
||||
policy: testPolicy,
|
||||
expCPUSet: cpuset.NewCPUSet(3, 4),
|
||||
expErr: nil,
|
||||
},
|
||||
{
|
||||
description: "cpu manager add - policy add container error",
|
||||
regErr: fmt.Errorf("fake reg error"),
|
||||
updateErr: nil,
|
||||
expErr: fmt.Errorf("fake reg error"),
|
||||
policy: &mockPolicy{
|
||||
err: fmt.Errorf("fake reg error"),
|
||||
},
|
||||
expCPUSet: cpuset.NewCPUSet(1, 2, 3, 4),
|
||||
expErr: fmt.Errorf("fake reg error"),
|
||||
},
|
||||
{
|
||||
description: "cpu manager add - container update error",
|
||||
regErr: nil,
|
||||
updateErr: fmt.Errorf("fake update error"),
|
||||
expErr: nil,
|
||||
policy: testPolicy,
|
||||
expCPUSet: cpuset.NewCPUSet(1, 2, 3, 4),
|
||||
expErr: fmt.Errorf("fake update error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
mgr := &manager{
|
||||
policy: &mockPolicy{
|
||||
err: testCase.regErr,
|
||||
},
|
||||
policy: testCase.policy,
|
||||
state: &mockState{
|
||||
assignments: state.ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4),
|
||||
},
|
||||
containerRuntime: mockRuntimeService{
|
||||
err: testCase.updateErr,
|
||||
@ -181,13 +199,17 @@ func TestCPUManagerAdd(t *testing.T) {
|
||||
podStatusProvider: mockPodStatusProvider{},
|
||||
}
|
||||
|
||||
pod := makePod("1000", "1000")
|
||||
pod := makePod("2", "2")
|
||||
container := &pod.Spec.Containers[0]
|
||||
err := mgr.AddContainer(pod, container, "fakeID")
|
||||
if !reflect.DeepEqual(err, testCase.expErr) {
|
||||
t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v",
|
||||
testCase.description, testCase.expErr, err)
|
||||
}
|
||||
if !testCase.expCPUSet.Equals(mgr.state.GetDefaultCPUSet()) {
|
||||
t.Errorf("CPU Manager AddContainer() error (%v). expected cpuset: %v but got: %v",
|
||||
testCase.description, testCase.expCPUSet, mgr.state.GetDefaultCPUSet())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -199,7 +221,6 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
isTopologyBroken bool
|
||||
expectedPolicy string
|
||||
expectedError error
|
||||
skipIfPermissionsError bool
|
||||
}{
|
||||
{
|
||||
description: "set none policy",
|
||||
@ -218,7 +239,6 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
cpuPolicyName: "static",
|
||||
nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(3, resource.DecimalSI)},
|
||||
expectedPolicy: "static",
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
{
|
||||
description: "static policy - broken topology",
|
||||
@ -226,21 +246,18 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
isTopologyBroken: true,
|
||||
expectedError: fmt.Errorf("could not detect number of cpus"),
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
{
|
||||
description: "static policy - broken reservation",
|
||||
cpuPolicyName: "static",
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
expectedError: fmt.Errorf("unable to determine reserved CPU resources for static policy"),
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
{
|
||||
description: "static policy - no CPU resources",
|
||||
cpuPolicyName: "static",
|
||||
nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)},
|
||||
expectedError: fmt.Errorf("the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero"),
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
generated
vendored
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package cpumanager
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
)
|
||||
@ -28,21 +28,21 @@ type fakeManager struct {
|
||||
}
|
||||
|
||||
func (m *fakeManager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) {
|
||||
glog.Info("[fake cpumanager] Start()")
|
||||
klog.Info("[fake cpumanager] Start()")
|
||||
}
|
||||
|
||||
func (m *fakeManager) Policy() Policy {
|
||||
glog.Info("[fake cpumanager] Policy()")
|
||||
klog.Info("[fake cpumanager] Policy()")
|
||||
return NewNonePolicy()
|
||||
}
|
||||
|
||||
func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) error {
|
||||
glog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
klog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *fakeManager) RemoveContainer(containerID string) error {
|
||||
glog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID)
|
||||
klog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go
generated
vendored
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package cpumanager
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
)
|
||||
|
||||
@ -39,7 +39,7 @@ func (p *nonePolicy) Name() string {
|
||||
}
|
||||
|
||||
func (p *nonePolicy) Start(s state.State) {
|
||||
glog.Info("[cpumanager] none policy: Start")
|
||||
klog.Info("[cpumanager] none policy: Start")
|
||||
}
|
||||
|
||||
func (p *nonePolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
|
||||
|
39
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
39
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
@ -19,8 +19,8 @@ package cpumanager
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
@ -30,8 +30,6 @@ import (
|
||||
// PolicyStatic is the name of the static policy
|
||||
const PolicyStatic policyName = "static"
|
||||
|
||||
var _ Policy = &staticPolicy{}
|
||||
|
||||
// staticPolicy is a CPU manager policy that does not change CPU
|
||||
// assignments for exclusively pinned guaranteed containers after the main
|
||||
// container process starts.
|
||||
@ -96,7 +94,7 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int) Policy
|
||||
panic(fmt.Sprintf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs))
|
||||
}
|
||||
|
||||
glog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved)
|
||||
klog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved)
|
||||
|
||||
return &staticPolicy{
|
||||
topology: topology,
|
||||
@ -110,7 +108,7 @@ func (p *staticPolicy) Name() string {
|
||||
|
||||
func (p *staticPolicy) Start(s state.State) {
|
||||
if err := p.validateState(s); err != nil {
|
||||
glog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error())
|
||||
klog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error())
|
||||
panic("[cpumanager] - please drain node and remove policy state file")
|
||||
}
|
||||
}
|
||||
@ -131,7 +129,7 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
}
|
||||
|
||||
// State has already been initialized from file (is not empty)
|
||||
// 1 Check if the reserved cpuset is not part of default cpuset because:
|
||||
// 1. Check if the reserved cpuset is not part of default cpuset because:
|
||||
// - kube/system reserved have changed (increased) - may lead to some containers not being able to start
|
||||
// - user tampered with file
|
||||
if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) {
|
||||
@ -147,6 +145,23 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
cID, cset.String(), tmpDefaultCPUset.String())
|
||||
}
|
||||
}
|
||||
|
||||
// 3. It's possible that the set of available CPUs has changed since
|
||||
// the state was written. This can be due to for example
|
||||
// offlining a CPU when kubelet is not running. If this happens,
|
||||
// CPU manager will run into trouble when later it tries to
|
||||
// assign non-existent CPUs to containers. Validate that the
|
||||
// topology that was received during CPU manager startup matches with
|
||||
// the set of CPUs stored in the state.
|
||||
totalKnownCPUs := tmpDefaultCPUset.Clone()
|
||||
for _, cset := range tmpAssignments {
|
||||
totalKnownCPUs = totalKnownCPUs.Union(cset)
|
||||
}
|
||||
if !totalKnownCPUs.Equals(p.topology.CPUDetails.CPUs()) {
|
||||
return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"",
|
||||
p.topology.CPUDetails.CPUs().String(), totalKnownCPUs.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -157,17 +172,17 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet {
|
||||
|
||||
func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
|
||||
if numCPUs := guaranteedCPUs(pod, container); numCPUs != 0 {
|
||||
glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
klog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
// container belongs in an exclusively allocated pool
|
||||
|
||||
if _, ok := s.GetCPUSet(containerID); ok {
|
||||
glog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID)
|
||||
klog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
cpuset, err := p.allocateCPUs(s, numCPUs)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err)
|
||||
klog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err)
|
||||
return err
|
||||
}
|
||||
s.SetCPUSet(containerID, cpuset)
|
||||
@ -177,7 +192,7 @@ func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Co
|
||||
}
|
||||
|
||||
func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error {
|
||||
glog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID)
|
||||
klog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID)
|
||||
if toRelease, ok := s.GetCPUSet(containerID); ok {
|
||||
s.Delete(containerID)
|
||||
// Mutate the shared pool, adding released cpus.
|
||||
@ -187,7 +202,7 @@ func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error
|
||||
}
|
||||
|
||||
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet, error) {
|
||||
glog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs)
|
||||
klog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs)
|
||||
result, err := takeByTopology(p.topology, p.assignableCPUs(s), numCPUs)
|
||||
if err != nil {
|
||||
return cpuset.NewCPUSet(), err
|
||||
@ -195,7 +210,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet,
|
||||
// Remove allocated CPUs from the shared CPUSet.
|
||||
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result))
|
||||
|
||||
glog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result)
|
||||
klog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static_test.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static_test.go
generated
vendored
@ -87,6 +87,26 @@ func TestStaticPolicyStart(t *testing.T) {
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
|
||||
expPanic: true,
|
||||
},
|
||||
{
|
||||
description: "core 12 is not present in topology but is in state cpuset",
|
||||
topo: topoDualSocketHT,
|
||||
stAssignments: state.ContainerCPUAssignments{
|
||||
"0": cpuset.NewCPUSet(0, 1, 2),
|
||||
"1": cpuset.NewCPUSet(3, 4),
|
||||
},
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7, 8, 9, 10, 11, 12),
|
||||
expPanic: true,
|
||||
},
|
||||
{
|
||||
description: "core 11 is present in topology but is not in state cpuset",
|
||||
topo: topoDualSocketHT,
|
||||
stAssignments: state.ContainerCPUAssignments{
|
||||
"0": cpuset.NewCPUSet(0, 1, 2),
|
||||
"1": cpuset.NewCPUSet(3, 4),
|
||||
},
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7, 8, 9, 10),
|
||||
expPanic: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD
generated
vendored
@ -3,23 +3,36 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"state.go",
|
||||
"state_checkpoint.go",
|
||||
"state_file.go",
|
||||
"state_mem.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager/checksum:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager/errors:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["state_file_test.go"],
|
||||
srcs = [
|
||||
"state_checkpoint_test.go",
|
||||
"state_compatibility_test.go",
|
||||
"state_file_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//pkg/kubelet/cm/cpuset:go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager/state/testing:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
@ -31,7 +44,10 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/cm/cpumanager/state/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
||||
)
|
||||
|
||||
var _ checkpointmanager.Checkpoint = &CPUManagerCheckpoint{}
|
||||
|
||||
// CPUManagerCheckpoint struct is used to store cpu/pod assignments in a checkpoint
|
||||
type CPUManagerCheckpoint struct {
|
||||
PolicyName string `json:"policyName"`
|
||||
DefaultCPUSet string `json:"defaultCpuSet"`
|
||||
Entries map[string]string `json:"entries,omitempty"`
|
||||
Checksum checksum.Checksum `json:"checksum"`
|
||||
}
|
||||
|
||||
// NewCPUManagerCheckpoint returns an instance of Checkpoint
|
||||
func NewCPUManagerCheckpoint() *CPUManagerCheckpoint {
|
||||
return &CPUManagerCheckpoint{
|
||||
Entries: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalCheckpoint returns marshalled checkpoint
|
||||
func (cp *CPUManagerCheckpoint) MarshalCheckpoint() ([]byte, error) {
|
||||
// make sure checksum wasn't set before so it doesn't affect output checksum
|
||||
cp.Checksum = 0
|
||||
cp.Checksum = checksum.New(cp)
|
||||
return json.Marshal(*cp)
|
||||
}
|
||||
|
||||
// UnmarshalCheckpoint tries to unmarshal passed bytes to checkpoint
|
||||
func (cp *CPUManagerCheckpoint) UnmarshalCheckpoint(blob []byte) error {
|
||||
return json.Unmarshal(blob, cp)
|
||||
}
|
||||
|
||||
// VerifyChecksum verifies that current checksum of checkpoint is valid
|
||||
func (cp *CPUManagerCheckpoint) VerifyChecksum() error {
|
||||
if cp.Checksum == 0 {
|
||||
// accept empty checksum for compatibility with old file backend
|
||||
return nil
|
||||
}
|
||||
ck := cp.Checksum
|
||||
cp.Checksum = 0
|
||||
err := ck.Verify(cp)
|
||||
cp.Checksum = ck
|
||||
return err
|
||||
}
|
194
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go
generated
vendored
Normal file
194
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
var _ State = &stateCheckpoint{}
|
||||
|
||||
type stateCheckpoint struct {
|
||||
mux sync.RWMutex
|
||||
policyName string
|
||||
cache State
|
||||
checkpointManager checkpointmanager.CheckpointManager
|
||||
checkpointName string
|
||||
}
|
||||
|
||||
// NewCheckpointState creates new State for keeping track of cpu/pod assignment with checkpoint backend
|
||||
func NewCheckpointState(stateDir, checkpointName, policyName string) (State, error) {
|
||||
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err)
|
||||
}
|
||||
stateCheckpoint := &stateCheckpoint{
|
||||
cache: NewMemoryState(),
|
||||
policyName: policyName,
|
||||
checkpointManager: checkpointManager,
|
||||
checkpointName: checkpointName,
|
||||
}
|
||||
|
||||
if err := stateCheckpoint.restoreState(); err != nil {
|
||||
return nil, fmt.Errorf("could not restore state from checkpoint: %v\n"+
|
||||
"Please drain this node and delete the CPU manager checkpoint file %q before restarting Kubelet.",
|
||||
err, path.Join(stateDir, checkpointName))
|
||||
}
|
||||
|
||||
return stateCheckpoint, nil
|
||||
}
|
||||
|
||||
// restores state from a checkpoint and creates it if it doesn't exist
|
||||
func (sc *stateCheckpoint) restoreState() error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
var err error
|
||||
|
||||
// used when all parsing is ok
|
||||
tmpAssignments := make(ContainerCPUAssignments)
|
||||
tmpDefaultCPUSet := cpuset.NewCPUSet()
|
||||
tmpContainerCPUSet := cpuset.NewCPUSet()
|
||||
|
||||
checkpoint := NewCPUManagerCheckpoint()
|
||||
if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpoint); err != nil {
|
||||
if err == errors.ErrCheckpointNotFound {
|
||||
sc.storeState()
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if sc.policyName != checkpoint.PolicyName {
|
||||
return fmt.Errorf("configured policy %q differs from state checkpoint policy %q", sc.policyName, checkpoint.PolicyName)
|
||||
}
|
||||
|
||||
if tmpDefaultCPUSet, err = cpuset.Parse(checkpoint.DefaultCPUSet); err != nil {
|
||||
return fmt.Errorf("could not parse default cpu set %q: %v", checkpoint.DefaultCPUSet, err)
|
||||
}
|
||||
|
||||
for containerID, cpuString := range checkpoint.Entries {
|
||||
if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil {
|
||||
return fmt.Errorf("could not parse cpuset %q for container id %q: %v", cpuString, containerID, err)
|
||||
}
|
||||
tmpAssignments[containerID] = tmpContainerCPUSet
|
||||
}
|
||||
|
||||
sc.cache.SetDefaultCPUSet(tmpDefaultCPUSet)
|
||||
sc.cache.SetCPUAssignments(tmpAssignments)
|
||||
|
||||
klog.V(2).Info("[cpumanager] state checkpoint: restored state from checkpoint")
|
||||
klog.V(2).Infof("[cpumanager] state checkpoint: defaultCPUSet: %s", tmpDefaultCPUSet.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saves state to a checkpoint, caller is responsible for locking
|
||||
func (sc *stateCheckpoint) storeState() {
|
||||
checkpoint := NewCPUManagerCheckpoint()
|
||||
checkpoint.PolicyName = sc.policyName
|
||||
checkpoint.DefaultCPUSet = sc.cache.GetDefaultCPUSet().String()
|
||||
|
||||
for containerID, cset := range sc.cache.GetCPUAssignments() {
|
||||
checkpoint.Entries[containerID] = cset.String()
|
||||
}
|
||||
|
||||
err := sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
|
||||
|
||||
if err != nil {
|
||||
panic("[cpumanager] could not save checkpoint: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// GetCPUSet returns current CPU set
|
||||
func (sc *stateCheckpoint) GetCPUSet(containerID string) (cpuset.CPUSet, bool) {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
|
||||
res, ok := sc.cache.GetCPUSet(containerID)
|
||||
return res, ok
|
||||
}
|
||||
|
||||
// GetDefaultCPUSet returns default CPU set
|
||||
func (sc *stateCheckpoint) GetDefaultCPUSet() cpuset.CPUSet {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
|
||||
return sc.cache.GetDefaultCPUSet()
|
||||
}
|
||||
|
||||
// GetCPUSetOrDefault returns current CPU set, or default one if it wasn't changed
|
||||
func (sc *stateCheckpoint) GetCPUSetOrDefault(containerID string) cpuset.CPUSet {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
|
||||
return sc.cache.GetCPUSetOrDefault(containerID)
|
||||
}
|
||||
|
||||
// GetCPUAssignments returns current CPU to pod assignments
|
||||
func (sc *stateCheckpoint) GetCPUAssignments() ContainerCPUAssignments {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
|
||||
return sc.cache.GetCPUAssignments()
|
||||
}
|
||||
|
||||
// SetCPUSet sets CPU set
|
||||
func (sc *stateCheckpoint) SetCPUSet(containerID string, cset cpuset.CPUSet) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetCPUSet(containerID, cset)
|
||||
sc.storeState()
|
||||
}
|
||||
|
||||
// SetDefaultCPUSet sets default CPU set
|
||||
func (sc *stateCheckpoint) SetDefaultCPUSet(cset cpuset.CPUSet) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetDefaultCPUSet(cset)
|
||||
sc.storeState()
|
||||
}
|
||||
|
||||
// SetCPUAssignments sets CPU to pod assignments
|
||||
func (sc *stateCheckpoint) SetCPUAssignments(a ContainerCPUAssignments) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetCPUAssignments(a)
|
||||
sc.storeState()
|
||||
}
|
||||
|
||||
// Delete deletes assignment for specified pod
|
||||
func (sc *stateCheckpoint) Delete(containerID string) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.Delete(containerID)
|
||||
sc.storeState()
|
||||
}
|
||||
|
||||
// ClearState clears the state and saves it in a checkpoint
|
||||
func (sc *stateCheckpoint) ClearState() {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.ClearState()
|
||||
sc.storeState()
|
||||
}
|
326
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint_test.go
generated
vendored
Normal file
326
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint_test.go
generated
vendored
Normal file
@ -0,0 +1,326 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
testutil "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
const testingCheckpoint = "cpumanager_checkpoint_test"
|
||||
|
||||
var testingDir = os.TempDir()
|
||||
|
||||
func TestCheckpointStateRestore(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
checkpointContent string
|
||||
policyName string
|
||||
expectedError string
|
||||
expectedState *stateMemory
|
||||
}{
|
||||
{
|
||||
"Restore non-existing checkpoint",
|
||||
"",
|
||||
"none",
|
||||
"",
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore default cpu set",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "4-6",
|
||||
"entries": {},
|
||||
"checksum": 2912033808
|
||||
}`,
|
||||
"none",
|
||||
"",
|
||||
&stateMemory{
|
||||
defaultCPUSet: cpuset.NewCPUSet(4, 5, 6),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Restore valid checkpoint",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "1-3",
|
||||
"entries": {
|
||||
"container1": "4-6",
|
||||
"container2": "1-3"
|
||||
},
|
||||
"checksum": 1535905563
|
||||
}`,
|
||||
"none",
|
||||
"",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{
|
||||
"container1": cpuset.NewCPUSet(4, 5, 6),
|
||||
"container2": cpuset.NewCPUSet(1, 2, 3),
|
||||
},
|
||||
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with invalid checksum",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "4-6",
|
||||
"entries": {},
|
||||
"checksum": 1337
|
||||
}`,
|
||||
"none",
|
||||
"checkpoint is corrupted",
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with invalid JSON",
|
||||
`{`,
|
||||
"none",
|
||||
"unexpected end of JSON input",
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with invalid policy name",
|
||||
`{
|
||||
"policyName": "other",
|
||||
"defaultCPUSet": "1-3",
|
||||
"entries": {},
|
||||
"checksum": 4195836012
|
||||
}`,
|
||||
"none",
|
||||
`configured policy "none" differs from state checkpoint policy "other"`,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with unparsable default cpu set",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "1.3",
|
||||
"entries": {},
|
||||
"checksum": 1025273327
|
||||
}`,
|
||||
"none",
|
||||
`could not parse default cpu set "1.3": strconv.Atoi: parsing "1.3": invalid syntax`,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with unparsable assignment entry",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "1-3",
|
||||
"entries": {
|
||||
"container1": "4-6",
|
||||
"container2": "asd"
|
||||
},
|
||||
"checksum": 2764213924
|
||||
}`,
|
||||
"none",
|
||||
`could not parse cpuset "asd" for container id "container2": strconv.Atoi: parsing "asd": invalid syntax`,
|
||||
&stateMemory{},
|
||||
},
|
||||
}
|
||||
|
||||
// create checkpoint manager for testing
|
||||
cpm, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// ensure there is no previous checkpoint
|
||||
cpm.RemoveCheckpoint(testingCheckpoint)
|
||||
|
||||
// prepare checkpoint for testing
|
||||
if strings.TrimSpace(tc.checkpointContent) != "" {
|
||||
checkpoint := &testutil.MockCheckpoint{Content: tc.checkpointContent}
|
||||
if err := cpm.CreateCheckpoint(testingCheckpoint, checkpoint); err != nil {
|
||||
t.Fatalf("could not create testing checkpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
restoredState, err := NewCheckpointState(testingDir, testingCheckpoint, tc.policyName)
|
||||
if err != nil {
|
||||
if strings.TrimSpace(tc.expectedError) != "" {
|
||||
tc.expectedError = "could not restore state from checkpoint: " + tc.expectedError
|
||||
if strings.HasPrefix(err.Error(), tc.expectedError) {
|
||||
t.Logf("got expected error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Fatalf("unexpected error while creatng checkpointState: %v", err)
|
||||
}
|
||||
|
||||
// compare state after restoration with the one expected
|
||||
AssertStateEqual(t, restoredState, tc.expectedState)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateStore(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
expectedState *stateMemory
|
||||
}{
|
||||
{
|
||||
"Store default cpu set",
|
||||
&stateMemory{defaultCPUSet: cpuset.NewCPUSet(1, 2, 3)},
|
||||
},
|
||||
{
|
||||
"Store assignments",
|
||||
&stateMemory{
|
||||
assignments: map[string]cpuset.CPUSet{
|
||||
"container1": cpuset.NewCPUSet(1, 5, 8),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cpm, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// ensure there is no previous checkpoint
|
||||
cpm.RemoveCheckpoint(testingCheckpoint)
|
||||
|
||||
cs1, err := NewCheckpointState(testingDir, testingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpointState instance: %v", err)
|
||||
}
|
||||
|
||||
// set values of cs1 instance so they are stored in checkpoint and can be read by cs2
|
||||
cs1.SetDefaultCPUSet(tc.expectedState.defaultCPUSet)
|
||||
cs1.SetCPUAssignments(tc.expectedState.assignments)
|
||||
|
||||
// restore checkpoint with previously stored values
|
||||
cs2, err := NewCheckpointState(testingDir, testingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpointState instance: %v", err)
|
||||
}
|
||||
|
||||
AssertStateEqual(t, cs2, tc.expectedState)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateHelpers(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
defaultCPUset cpuset.CPUSet
|
||||
containers map[string]cpuset.CPUSet
|
||||
}{
|
||||
{
|
||||
description: "One container",
|
||||
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
|
||||
containers: map[string]cpuset.CPUSet{
|
||||
"c1": cpuset.NewCPUSet(0, 1),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Two containers",
|
||||
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
|
||||
containers: map[string]cpuset.CPUSet{
|
||||
"c1": cpuset.NewCPUSet(0, 1),
|
||||
"c2": cpuset.NewCPUSet(2, 3, 4, 5),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Container without assigned cpus",
|
||||
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
|
||||
containers: map[string]cpuset.CPUSet{
|
||||
"c1": cpuset.NewCPUSet(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cpm, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// ensure there is no previous checkpoint
|
||||
cpm.RemoveCheckpoint(testingCheckpoint)
|
||||
|
||||
state, err := NewCheckpointState(testingDir, testingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpointState instance: %v", err)
|
||||
}
|
||||
state.SetDefaultCPUSet(tc.defaultCPUset)
|
||||
|
||||
for container, set := range tc.containers {
|
||||
state.SetCPUSet(container, set)
|
||||
if cpus, _ := state.GetCPUSet(container); !cpus.Equals(set) {
|
||||
t.Fatalf("state inconsistent, got %q instead of %q", set, cpus)
|
||||
}
|
||||
|
||||
state.Delete(container)
|
||||
if _, ok := state.GetCPUSet(container); ok {
|
||||
t.Fatal("deleted container still existing in state")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateClear(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
defaultCPUset cpuset.CPUSet
|
||||
containers map[string]cpuset.CPUSet
|
||||
}{
|
||||
{
|
||||
"Valid state",
|
||||
cpuset.NewCPUSet(1, 5, 10),
|
||||
map[string]cpuset.CPUSet{
|
||||
"container1": cpuset.NewCPUSet(1, 4),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
state, err := NewCheckpointState(testingDir, testingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpointState instance: %v", err)
|
||||
}
|
||||
|
||||
state.SetDefaultCPUSet(tc.defaultCPUset)
|
||||
state.SetCPUAssignments(tc.containers)
|
||||
|
||||
state.ClearState()
|
||||
if !cpuset.NewCPUSet().Equals(state.GetDefaultCPUSet()) {
|
||||
t.Fatal("cleared state with non-empty default cpu set")
|
||||
}
|
||||
for container := range tc.containers {
|
||||
if _, ok := state.GetCPUSet(container); ok {
|
||||
t.Fatalf("container %q with non-default cpu set in cleared state", container)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
78
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_compatibility_test.go
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_compatibility_test.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
const compatibilityTestingCheckpoint = "cpumanager_state_compatibility_test"
|
||||
|
||||
var state = &stateMemory{
|
||||
assignments: ContainerCPUAssignments{
|
||||
"container1": cpuset.NewCPUSet(4, 5, 6),
|
||||
"container2": cpuset.NewCPUSet(1, 2, 3),
|
||||
},
|
||||
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3),
|
||||
}
|
||||
|
||||
func TestFileToCheckpointCompatibility(t *testing.T) {
|
||||
statePath := path.Join(testingDir, compatibilityTestingCheckpoint)
|
||||
|
||||
// ensure there is no previous state saved at testing path
|
||||
os.Remove(statePath)
|
||||
// ensure testing state is removed after testing
|
||||
defer os.Remove(statePath)
|
||||
|
||||
fileState := NewFileState(statePath, "none")
|
||||
|
||||
fileState.SetDefaultCPUSet(state.defaultCPUSet)
|
||||
fileState.SetCPUAssignments(state.assignments)
|
||||
|
||||
restoredState, err := NewCheckpointState(testingDir, compatibilityTestingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not restore file state: %v", err)
|
||||
}
|
||||
|
||||
AssertStateEqual(t, restoredState, state)
|
||||
}
|
||||
|
||||
func TestCheckpointToFileCompatibility(t *testing.T) {
|
||||
cpm, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
// ensure there is no previous checkpoint
|
||||
cpm.RemoveCheckpoint(compatibilityTestingCheckpoint)
|
||||
// ensure testing checkpoint is removed after testing
|
||||
defer cpm.RemoveCheckpoint(compatibilityTestingCheckpoint)
|
||||
|
||||
checkpointState, err := NewCheckpointState(testingDir, compatibilityTestingCheckpoint, "none")
|
||||
|
||||
checkpointState.SetDefaultCPUSet(state.defaultCPUSet)
|
||||
checkpointState.SetCPUAssignments(state.assignments)
|
||||
|
||||
restoredState := NewFileState(path.Join(testingDir, compatibilityTestingCheckpoint), "none")
|
||||
|
||||
AssertStateEqual(t, restoredState, state)
|
||||
}
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go
generated
vendored
@ -19,8 +19,8 @@ package state
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"os"
|
||||
"sync"
|
||||
@ -79,7 +79,7 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
// If the state file does not exist or has zero length, write a new file.
|
||||
if os.IsNotExist(err) || len(content) == 0 {
|
||||
sf.storeState()
|
||||
glog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath)
|
||||
klog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -92,7 +92,7 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
var readState stateFileData
|
||||
|
||||
if err = json.Unmarshal(content, &readState); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath)
|
||||
klog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -101,13 +101,13 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
}
|
||||
|
||||
if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet)
|
||||
klog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet)
|
||||
return err
|
||||
}
|
||||
|
||||
for containerID, cpuString := range readState.Entries {
|
||||
if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString)
|
||||
klog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString)
|
||||
return err
|
||||
}
|
||||
tmpAssignments[containerID] = tmpContainerCPUSet
|
||||
@ -116,8 +116,8 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet)
|
||||
sf.cache.SetCPUAssignments(tmpAssignments)
|
||||
|
||||
glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath)
|
||||
glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String())
|
||||
klog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath)
|
||||
klog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file_test.go
generated
vendored
@ -34,7 +34,8 @@ func writeToStateFile(statefile string, content string) {
|
||||
ioutil.WriteFile(statefile, []byte(content), 0644)
|
||||
}
|
||||
|
||||
func stateEqual(t *testing.T, sf State, sm State) {
|
||||
// AssertStateEqual marks provided test as failed if provided states differ
|
||||
func AssertStateEqual(t *testing.T, sf State, sm State) {
|
||||
cpusetSf := sf.GetDefaultCPUSet()
|
||||
cpusetSm := sm.GetDefaultCPUSet()
|
||||
if !cpusetSf.Equals(cpusetSm) {
|
||||
@ -253,7 +254,7 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
stateEqual(t, fileState, tc.expectedState)
|
||||
AssertStateEqual(t, fileState, tc.expectedState)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -363,7 +364,7 @@ func TestUpdateStateFile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
newFileState := NewFileState(sfilePath.Name(), "static")
|
||||
stateEqual(t, newFileState, tc.expectedState)
|
||||
AssertStateEqual(t, newFileState, tc.expectedState)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -471,7 +472,6 @@ func TestClearStateStateFile(t *testing.T) {
|
||||
t.Error("cleared state shoudn't has got information about containers")
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go
generated
vendored
@ -19,7 +19,7 @@ package state
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
@ -33,7 +33,7 @@ var _ State = &stateMemory{}
|
||||
|
||||
// NewMemoryState creates new State for keeping track of cpu/pod assignment
|
||||
func NewMemoryState() State {
|
||||
glog.Infof("[cpumanager] initializing new in-memory state store")
|
||||
klog.Infof("[cpumanager] initializing new in-memory state store")
|
||||
return &stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
@ -73,7 +73,7 @@ func (s *stateMemory) SetCPUSet(containerID string, cset cpuset.CPUSet) {
|
||||
defer s.Unlock()
|
||||
|
||||
s.assignments[containerID] = cset
|
||||
glog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset)
|
||||
klog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset)
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) {
|
||||
@ -81,7 +81,7 @@ func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) {
|
||||
defer s.Unlock()
|
||||
|
||||
s.defaultCPUSet = cset
|
||||
glog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset)
|
||||
klog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset)
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) {
|
||||
@ -89,7 +89,7 @@ func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) {
|
||||
defer s.Unlock()
|
||||
|
||||
s.assignments = a.Clone()
|
||||
glog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a)
|
||||
klog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a)
|
||||
}
|
||||
|
||||
func (s *stateMemory) Delete(containerID string) {
|
||||
@ -97,7 +97,7 @@ func (s *stateMemory) Delete(containerID string) {
|
||||
defer s.Unlock()
|
||||
|
||||
delete(s.assignments, containerID)
|
||||
glog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID)
|
||||
klog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID)
|
||||
}
|
||||
|
||||
func (s *stateMemory) ClearState() {
|
||||
@ -106,5 +106,5 @@ func (s *stateMemory) ClearState() {
|
||||
|
||||
s.defaultCPUSet = cpuset.CPUSet{}
|
||||
s.assignments = make(ContainerCPUAssignments)
|
||||
glog.V(2).Infof("[cpumanager] cleared state")
|
||||
klog.V(2).Infof("[cpumanager] cleared state")
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing/BUILD
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing/BUILD
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//pkg/kubelet/checkpointmanager:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
41
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing/util.go
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing/util.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import "k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
|
||||
var _ checkpointmanager.Checkpoint = &MockCheckpoint{}
|
||||
|
||||
// MockCheckpoint struct is used for mocking checkpoint values in testing
|
||||
type MockCheckpoint struct {
|
||||
Content string
|
||||
}
|
||||
|
||||
// MarshalCheckpoint returns fake content
|
||||
func (mc *MockCheckpoint) MarshalCheckpoint() ([]byte, error) {
|
||||
return []byte(mc.Content), nil
|
||||
}
|
||||
|
||||
// UnmarshalCheckpoint fakes unmarshaling
|
||||
func (mc *MockCheckpoint) UnmarshalCheckpoint(blob []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyChecksum fakes verifying checksum
|
||||
func (mc *MockCheckpoint) VerifyChecksum() error {
|
||||
return nil
|
||||
}
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD
generated
vendored
@ -10,8 +10,8 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
@ -156,7 +156,7 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) {
|
||||
numPhysicalCores += len(socket.Cores)
|
||||
for _, core := range socket.Cores {
|
||||
if coreID, err = getUniqueCoreID(core.Threads); err != nil {
|
||||
glog.Errorf("could not get unique coreID for socket: %d core %d threads: %v",
|
||||
klog.Errorf("could not get unique coreID for socket: %d core %d threads: %v",
|
||||
socket.Id, core.Id, core.Threads)
|
||||
return nil, err
|
||||
}
|
||||
|
Reference in New Issue
Block a user