mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
154
vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD
generated
vendored
154
vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD
generated
vendored
@ -3,121 +3,65 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cgroup_manager_linux.go",
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager.go",
|
||||
"container_manager_linux.go",
|
||||
"container_manager_stub.go",
|
||||
"container_manager_unsupported.go",
|
||||
"container_manager_windows.go",
|
||||
"fake_internal_container_lifecycle.go",
|
||||
"helpers.go",
|
||||
"helpers_linux.go",
|
||||
"helpers_unsupported.go",
|
||||
"internal_container_lifecycle.go",
|
||||
"node_container_manager_linux.go",
|
||||
"pod_container_manager_linux.go",
|
||||
"pod_container_manager_stub.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
"qos_container_manager_linux.go",
|
||||
"types.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"cgroup_manager_linux.go",
|
||||
"container_manager_linux.go",
|
||||
"helpers_linux.go",
|
||||
"node_container_manager.go",
|
||||
"pod_container_manager_linux.go",
|
||||
"qos_container_manager_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"cgroup_manager_unsupported.go",
|
||||
"container_manager_windows.go",
|
||||
"helpers_unsupported.go",
|
||||
"pod_container_manager_unsupported.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/podresources/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/eviction/api:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/util/pluginwatcher:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
@ -135,45 +79,45 @@ go_library(
|
||||
"//pkg/util/oom:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/github.com/docker/go-units:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
@ -181,27 +125,27 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"cgroup_manager_linux_test.go",
|
||||
"cgroup_manager_test.go",
|
||||
"container_manager_linux_test.go",
|
||||
"helpers_linux_test.go",
|
||||
"node_container_manager_test.go",
|
||||
"pod_container_manager_linux_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
srcs = [
|
||||
"cgroup_manager_linux_test.go",
|
||||
"cgroup_manager_test.go",
|
||||
"container_manager_linux_test.go",
|
||||
"helpers_linux_test.go",
|
||||
"node_container_manager_linux_test.go",
|
||||
"pod_container_manager_linux_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/eviction/api:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
|
31
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
31
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
@ -25,11 +25,11 @@ import (
|
||||
"time"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/golang/glog"
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
cgroupsystemd "github.com/opencontainers/runc/libcontainer/cgroups/systemd"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -67,7 +67,10 @@ func NewCgroupName(base CgroupName, components ...string) CgroupName {
|
||||
panic(fmt.Errorf("invalid character in component [%q] of CgroupName", component))
|
||||
}
|
||||
}
|
||||
return CgroupName(append(base, components...))
|
||||
// copy data from the base cgroup to eliminate cases where CgroupNames share underlying slices. See #68416
|
||||
baseCopy := make([]string, len(base))
|
||||
copy(baseCopy, base)
|
||||
return CgroupName(append(baseCopy, components...))
|
||||
}
|
||||
|
||||
func escapeSystemdCgroupName(part string) string {
|
||||
@ -204,18 +207,16 @@ func NewCgroupManager(cs *CgroupSubsystems, cgroupDriver string) CgroupManager {
|
||||
func (m *cgroupManagerImpl) Name(name CgroupName) string {
|
||||
if m.adapter.cgroupManagerType == libcontainerSystemd {
|
||||
return name.ToSystemd()
|
||||
} else {
|
||||
return name.ToCgroupfs()
|
||||
}
|
||||
return name.ToCgroupfs()
|
||||
}
|
||||
|
||||
// CgroupName converts the literal cgroupfs name on the host to an internal identifier.
|
||||
func (m *cgroupManagerImpl) CgroupName(name string) CgroupName {
|
||||
if m.adapter.cgroupManagerType == libcontainerSystemd {
|
||||
return ParseSystemdToCgroupName(name)
|
||||
} else {
|
||||
return ParseCgroupfsToCgroupName(name)
|
||||
}
|
||||
return ParseCgroupfsToCgroupName(name)
|
||||
}
|
||||
|
||||
// buildCgroupPaths builds a path to each cgroup subsystem for the specified name.
|
||||
@ -257,6 +258,7 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool {
|
||||
// once resolved, we can remove this code.
|
||||
whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd")
|
||||
|
||||
var missingPaths []string
|
||||
// If even one cgroup path doesn't exist, then the cgroup doesn't exist.
|
||||
for controller, path := range cgroupPaths {
|
||||
// ignore mounts we don't care about
|
||||
@ -264,10 +266,15 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool {
|
||||
continue
|
||||
}
|
||||
if !libcontainercgroups.PathExists(path) {
|
||||
return false
|
||||
missingPaths = append(missingPaths, path)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingPaths) > 0 {
|
||||
klog.V(4).Infof("The Cgroup %v has some missing paths: %v", name, missingPaths)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -343,7 +350,7 @@ func setSupportedSubsystems(cgroupConfig *libcontainerconfigs.Cgroup) error {
|
||||
return fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name())
|
||||
}
|
||||
// the cgroup is not mounted, but its not required so continue...
|
||||
glog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name())
|
||||
klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name())
|
||||
continue
|
||||
}
|
||||
if err := sys.Set(cgroupConfig.Paths[sys.Name()], cgroupConfig); err != nil {
|
||||
@ -505,7 +512,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int {
|
||||
// WalkFunc which is called for each file and directory in the pod cgroup dir
|
||||
visitor := func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
glog.V(4).Infof("cgroup manager encountered error scanning cgroup path %q: %v", path, err)
|
||||
klog.V(4).Infof("cgroup manager encountered error scanning cgroup path %q: %v", path, err)
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if !info.IsDir() {
|
||||
@ -513,7 +520,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int {
|
||||
}
|
||||
pids, err = getCgroupProcs(path)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("cgroup manager encountered error getting procs for cgroup path %q: %v", path, err)
|
||||
klog.V(4).Infof("cgroup manager encountered error getting procs for cgroup path %q: %v", path, err)
|
||||
return filepath.SkipDir
|
||||
}
|
||||
pidsToKill.Insert(pids...)
|
||||
@ -523,7 +530,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int {
|
||||
// container cgroups haven't been GCed yet. Get attached processes to
|
||||
// all such unwanted containers under the pod cgroup
|
||||
if err = filepath.Walk(dir, visitor); err != nil {
|
||||
glog.V(4).Infof("cgroup manager encountered error scanning pids for directory: %q: %v", dir, err)
|
||||
klog.V(4).Infof("cgroup manager encountered error scanning pids for directory: %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
return pidsToKill.List()
|
||||
@ -551,7 +558,7 @@ func getStatsSupportedSubsystems(cgroupPaths map[string]string) (*libcontainercg
|
||||
return nil, fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name())
|
||||
}
|
||||
// the cgroup is not mounted, but its not required so continue...
|
||||
glog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name())
|
||||
klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name())
|
||||
continue
|
||||
}
|
||||
if err := sys.GetStats(cgroupPaths[sys.Name()], stats); err != nil {
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux_test.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux_test.go
generated
vendored
@ -20,9 +20,34 @@ package cm
|
||||
|
||||
import (
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestNewCgroupName tests confirms that #68416 is fixed
|
||||
func TestNewCgroupName(t *testing.T) {
|
||||
a := ParseCgroupfsToCgroupName("/a/")
|
||||
ab := NewCgroupName(a, "b")
|
||||
|
||||
expectedAB := CgroupName([]string{"a", "", "b"})
|
||||
if !reflect.DeepEqual(ab, expectedAB) {
|
||||
t.Errorf("Expected %d%+v; got %d%+v", len(expectedAB), expectedAB, len(ab), ab)
|
||||
}
|
||||
|
||||
abc := NewCgroupName(ab, "c")
|
||||
|
||||
expectedABC := CgroupName([]string{"a", "", "b", "c"})
|
||||
if !reflect.DeepEqual(abc, expectedABC) {
|
||||
t.Errorf("Expected %d%+v; got %d%+v", len(expectedABC), expectedABC, len(abc), abc)
|
||||
}
|
||||
|
||||
_ = NewCgroupName(ab, "d")
|
||||
|
||||
if !reflect.DeepEqual(abc, expectedABC) {
|
||||
t.Errorf("Expected %d%+v; got %d%+v", len(expectedABC), expectedABC, len(abc), abc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCgroupNameToSystemdBasename(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input CgroupName
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go
generated
vendored
@ -23,11 +23,13 @@ import (
|
||||
// TODO: Migrate kubelet to either use its own internal objects or client library.
|
||||
"k8s.io/api/core/v1"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"fmt"
|
||||
@ -94,6 +96,14 @@ type ContainerManager interface {
|
||||
|
||||
// GetPodCgroupRoot returns the cgroup which contains all pods.
|
||||
GetPodCgroupRoot() string
|
||||
|
||||
// GetPluginRegistrationHandler returns a plugin registration handler
|
||||
// The pluginwatcher's Handlers allow to have a single module for handling
|
||||
// registration.
|
||||
GetPluginRegistrationHandler() pluginwatcher.PluginHandler
|
||||
|
||||
// GetDevices returns information about the devices assigned to pods and containers
|
||||
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||
}
|
||||
|
||||
type NodeConfig struct {
|
||||
@ -112,6 +122,7 @@ type NodeConfig struct {
|
||||
ExperimentalCPUManagerReconcilePeriod time.Duration
|
||||
ExperimentalPodPidsLimit int64
|
||||
EnforceCPULimits bool
|
||||
CPUCFSQuotaPeriod time.Duration
|
||||
}
|
||||
|
||||
type NodeAllocatableConfig struct {
|
||||
|
102
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go
generated
vendored
102
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go
generated
vendored
@ -29,20 +29,22 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
@ -52,13 +54,13 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
utilfile "k8s.io/kubernetes/pkg/util/file"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -179,11 +181,11 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
|
||||
// CPU cgroup is required and so it expected to be mounted at this point.
|
||||
periodExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us"))
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err)
|
||||
klog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err)
|
||||
}
|
||||
quotaExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us"))
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err)
|
||||
klog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err)
|
||||
}
|
||||
if quotaExists && periodExists {
|
||||
f.cpuHardcapping = true
|
||||
@ -200,19 +202,22 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
return nil, fmt.Errorf("failed to get mounted cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
// Check whether swap is enabled. The Kubelet does not support running with swap enabled.
|
||||
swapData, err := ioutil.ReadFile("/proc/swaps")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
swapData = bytes.TrimSpace(swapData) // extra trailing \n
|
||||
swapLines := strings.Split(string(swapData), "\n")
|
||||
if failSwapOn {
|
||||
// Check whether swap is enabled. The Kubelet does not support running with swap enabled.
|
||||
swapData, err := ioutil.ReadFile("/proc/swaps")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
swapData = bytes.TrimSpace(swapData) // extra trailing \n
|
||||
swapLines := strings.Split(string(swapData), "\n")
|
||||
|
||||
// If there is more than one line (table headers) in /proc/swaps, swap is enabled and we should
|
||||
// error out unless --fail-swap-on is set to false.
|
||||
if failSwapOn && len(swapLines) > 1 {
|
||||
return nil, fmt.Errorf("Running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", swapLines)
|
||||
// If there is more than one line (table headers) in /proc/swaps, swap is enabled and we should
|
||||
// error out unless --fail-swap-on is set to false.
|
||||
if len(swapLines) > 1 {
|
||||
return nil, fmt.Errorf("Running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", swapLines)
|
||||
}
|
||||
}
|
||||
|
||||
var capacity = v1.ResourceList{}
|
||||
// It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because
|
||||
// machine info is computed and cached once as part of cAdvisor object creation.
|
||||
@ -240,12 +245,12 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
if !cgroupManager.Exists(cgroupRoot) {
|
||||
return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist: %v", cgroupRoot, err)
|
||||
}
|
||||
glog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot)
|
||||
klog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot)
|
||||
// Include the top level cgroup for enforcing node allocatable into cgroup-root.
|
||||
// This way, all sub modules can avoid having to understand the concept of node allocatable.
|
||||
cgroupRoot = NewCgroupName(cgroupRoot, defaultNodeAllocatableCgroupName)
|
||||
}
|
||||
glog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig)
|
||||
klog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig)
|
||||
|
||||
qosContainerManager, err := NewQOSContainerManager(subsystems, cgroupRoot, nodeConfig, cgroupManager)
|
||||
if err != nil {
|
||||
@ -264,7 +269,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
qosContainerManager: qosContainerManager,
|
||||
}
|
||||
|
||||
glog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
|
||||
klog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
|
||||
if devicePluginEnabled {
|
||||
cm.deviceManager, err = devicemanager.NewManagerImpl()
|
||||
} else {
|
||||
@ -284,7 +289,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
nodeConfig.KubeletRootDir,
|
||||
)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to initialize cpu manager: %v", err)
|
||||
klog.Errorf("failed to initialize cpu manager: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -303,6 +308,7 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
|
||||
cgroupManager: cm.cgroupManager,
|
||||
podPidsLimit: cm.ExperimentalPodPidsLimit,
|
||||
enforceCPULimits: cm.EnforceCPULimits,
|
||||
cpuCFSQuotaPeriod: uint64(cm.CPUCFSQuotaPeriod / time.Microsecond),
|
||||
}
|
||||
}
|
||||
return &podContainerManagerNoop{
|
||||
@ -365,9 +371,9 @@ func setupKernelTunables(option KernelTunableBehavior) error {
|
||||
case KernelTunableError:
|
||||
errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val))
|
||||
case KernelTunableWarn:
|
||||
glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
klog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
case KernelTunableModify:
|
||||
glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
klog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
err = sysctl.SetSysctl(flag, expectedValue)
|
||||
if err != nil {
|
||||
errList = append(errList, err)
|
||||
@ -419,13 +425,13 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error {
|
||||
// the cgroup for docker and serve stats for the runtime.
|
||||
// TODO(#27097): Fix this after NodeSpec is clearly defined.
|
||||
cm.periodicTasks = append(cm.periodicTasks, func() {
|
||||
glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration")
|
||||
klog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration")
|
||||
cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont)
|
||||
klog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont)
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
cm.RuntimeCgroupsName = cont
|
||||
@ -462,12 +468,12 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error {
|
||||
} else {
|
||||
cm.periodicTasks = append(cm.periodicTasks, func() {
|
||||
if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
cont, err := getContainer(os.Getpid())
|
||||
if err != nil {
|
||||
glog.Errorf("failed to find cgroups of kubelet - %v", err)
|
||||
klog.Errorf("failed to find cgroups of kubelet - %v", err)
|
||||
return
|
||||
}
|
||||
cm.Lock()
|
||||
@ -540,12 +546,14 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
|
||||
// allocatable of the node
|
||||
cm.nodeInfo = node
|
||||
|
||||
rootfs, err := cm.cadvisorInterface.RootFsInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get rootfs info: %v", err)
|
||||
}
|
||||
for rName, rCap := range cadvisor.EphemeralStorageCapacityFromFsInfo(rootfs) {
|
||||
cm.capacity[rName] = rCap
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LocalStorageCapacityIsolation) {
|
||||
rootfs, err := cm.cadvisorInterface.RootFsInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get rootfs info: %v", err)
|
||||
}
|
||||
for rName, rCap := range cadvisor.EphemeralStorageCapacityFromFsInfo(rootfs) {
|
||||
cm.capacity[rName] = rCap
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that node allocatable configuration is valid.
|
||||
@ -572,7 +580,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
|
||||
for _, cont := range cm.systemContainers {
|
||||
if cont.ensureStateFunc != nil {
|
||||
if err := cont.ensureStateFunc(cont.manager); err != nil {
|
||||
glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
|
||||
klog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -598,6 +606,10 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetPluginRegistrationHandler() pluginwatcher.PluginHandler {
|
||||
return cm.deviceManager.GetWatcherHandler()
|
||||
}
|
||||
|
||||
// TODO: move the GetResources logic to PodContainerManager.
|
||||
func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
|
||||
opts := &kubecontainer.RunContainerOptions{}
|
||||
@ -641,12 +653,12 @@ func isProcessRunningInHost(pid int) (bool, error) {
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to find pid namespace of init process")
|
||||
}
|
||||
glog.V(10).Infof("init pid ns is %q", initPidNs)
|
||||
klog.V(10).Infof("init pid ns is %q", initPidNs)
|
||||
processPidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", pid))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to find pid namespace of process %q", pid)
|
||||
}
|
||||
glog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs)
|
||||
klog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs)
|
||||
return initPidNs == processPidNs, nil
|
||||
}
|
||||
|
||||
@ -688,7 +700,7 @@ func getPidsForProcess(name, pidFile string) ([]int, error) {
|
||||
|
||||
// Return error from getPidFromPidFile since that should have worked
|
||||
// and is the real source of the problem.
|
||||
glog.V(4).Infof("unable to get pid from %s: %v", pidFile, err)
|
||||
klog.V(4).Infof("unable to get pid from %s: %v", pidFile, err)
|
||||
return []int{}, err
|
||||
}
|
||||
|
||||
@ -726,7 +738,7 @@ func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs.
|
||||
return err
|
||||
} else if !runningInHost {
|
||||
// Process is running inside a container. Don't touch that.
|
||||
glog.V(2).Infof("pid %d is not running in the host namespaces", pid)
|
||||
klog.V(2).Infof("pid %d is not running in the host namespaces", pid)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -747,9 +759,9 @@ func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs.
|
||||
|
||||
// Also apply oom-score-adj to processes
|
||||
oomAdjuster := oom.NewOOMAdjuster()
|
||||
glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
|
||||
klog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
|
||||
if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
|
||||
glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
|
||||
klog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
|
||||
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d: %v", oomScoreAdj, pid, err))
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
@ -789,10 +801,10 @@ func getContainer(pid int) (string, error) {
|
||||
// in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally).
|
||||
if systemd, found := cgs["name=systemd"]; found {
|
||||
if systemd != cpu {
|
||||
glog.Warningf("CPUAccounting not enabled for pid: %d", pid)
|
||||
klog.Warningf("CPUAccounting not enabled for pid: %d", pid)
|
||||
}
|
||||
if systemd != memory {
|
||||
glog.Warningf("MemoryAccounting not enabled for pid: %d", pid)
|
||||
klog.Warningf("MemoryAccounting not enabled for pid: %d", pid)
|
||||
}
|
||||
return systemd, nil
|
||||
}
|
||||
@ -830,14 +842,14 @@ func ensureSystemCgroups(rootCgroupPath string, manager *fs.Manager) error {
|
||||
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
glog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids))
|
||||
klog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids))
|
||||
|
||||
// Check if we have moved all the non-kernel PIDs.
|
||||
if len(pids) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
glog.Infof("Moving non-kernel processes: %v", pids)
|
||||
klog.Infof("Moving non-kernel processes: %v", pids)
|
||||
for _, pid := range pids {
|
||||
err := manager.Apply(pid)
|
||||
if err != nil {
|
||||
@ -867,3 +879,7 @@ func (cm *containerManagerImpl) GetCapacity() v1.ResourceList {
|
||||
func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
|
||||
return cm.deviceManager.GetCapacity()
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
|
||||
return cm.deviceManager.GetDevices(podUID, containerName)
|
||||
}
|
||||
|
110
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go
generated
vendored
110
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go
generated
vendored
@ -19,8 +19,6 @@ limitations under the License.
|
||||
package cm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
@ -32,101 +30,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
type fakeMountInterface struct {
|
||||
mountPoints []mount.MountPoint
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) Mount(source string, target string, fstype string, options []string) error {
|
||||
return fmt.Errorf("unsupported")
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) Unmount(target string) error {
|
||||
return fmt.Errorf("unsupported")
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) List() ([]mount.MountPoint, error) {
|
||||
return mi.mountPoints, nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) IsMountPointMatch(mp mount.MountPoint, dir string) bool {
|
||||
return (mp.Path == dir)
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) IsNotMountPoint(dir string) (bool, error) {
|
||||
return false, fmt.Errorf("unsupported")
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) IsLikelyNotMountPoint(file string) (bool, error) {
|
||||
return false, fmt.Errorf("unsupported")
|
||||
}
|
||||
func (mi *fakeMountInterface) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) DeviceOpened(pathname string) (bool, error) {
|
||||
for _, mp := range mi.mountPoints {
|
||||
if mp.Device == pathname {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) PathIsDevice(pathname string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) MakeRShared(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) GetFileType(pathname string) (mount.FileType, error) {
|
||||
return mount.FileType("fake"), nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) MakeDir(pathname string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) MakeFile(pathname string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) ExistsPath(pathname string) (bool, error) {
|
||||
return true, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) PrepareSafeSubpath(subPath mount.Subpath) (newHostPath string, cleanupAction func(), err error) {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) CleanSubPaths(_, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) SafeMakeDir(_, _ string, _ os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) GetMountRefs(pathname string) ([]string, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) GetFSGroup(pathname string) (int64, error) {
|
||||
return -1, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) GetSELinuxSupport(pathname string) (bool, error) {
|
||||
return false, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (mi *fakeMountInterface) GetMode(pathname string) (os.FileMode, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func fakeContainerMgrMountInt() mount.Interface {
|
||||
return &fakeMountInterface{
|
||||
[]mount.MountPoint{
|
||||
return &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{
|
||||
Device: "cgroup",
|
||||
Type: "cgroup",
|
||||
@ -158,8 +64,8 @@ func TestCgroupMountValidationSuccess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCgroupMountValidationMemoryMissing(t *testing.T) {
|
||||
mountInt := &fakeMountInterface{
|
||||
[]mount.MountPoint{
|
||||
mountInt := &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{
|
||||
Device: "cgroup",
|
||||
Type: "cgroup",
|
||||
@ -182,8 +88,8 @@ func TestCgroupMountValidationMemoryMissing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCgroupMountValidationMultipleSubsystem(t *testing.T) {
|
||||
mountInt := &fakeMountInterface{
|
||||
[]mount.MountPoint{
|
||||
mountInt := &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{
|
||||
Device: "cgroup",
|
||||
Type: "cgroup",
|
||||
@ -212,8 +118,8 @@ func TestSoftRequirementsValidationSuccess(t *testing.T) {
|
||||
defer os.RemoveAll(tempDir)
|
||||
req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_period_us"), []byte("0"), os.ModePerm))
|
||||
req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_quota_us"), []byte("0"), os.ModePerm))
|
||||
mountInt := &fakeMountInterface{
|
||||
[]mount.MountPoint{
|
||||
mountInt := &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{
|
||||
Device: "cgroup",
|
||||
Type: "cgroup",
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go
generated
vendored
@ -17,16 +17,18 @@ limitations under the License.
|
||||
package cm
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
@ -35,7 +37,7 @@ type containerManagerStub struct{}
|
||||
var _ ContainerManager = &containerManagerStub{}
|
||||
|
||||
func (cm *containerManagerStub) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error {
|
||||
glog.V(2).Infof("Starting stub container manager")
|
||||
klog.V(2).Infof("Starting stub container manager")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -76,6 +78,10 @@ func (cm *containerManagerStub) GetCapacity() v1.ResourceList {
|
||||
return c
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) GetPluginRegistrationHandler() pluginwatcher.PluginHandler {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
|
||||
return nil, nil, []string{}
|
||||
}
|
||||
@ -100,6 +106,10 @@ func (cm *containerManagerStub) GetPodCgroupRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewStubContainerManager() ContainerManager {
|
||||
return &containerManagerStub{}
|
||||
}
|
||||
|
139
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go
generated
vendored
139
vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go
generated
vendored
@ -16,31 +16,158 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// containerManagerImpl implements container manager on Windows.
|
||||
// Only GetNodeAllocatableReservation() and GetCapacity() are implemented now.
|
||||
|
||||
package cm
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
type containerManagerImpl struct {
|
||||
containerManagerStub
|
||||
// Capacity of this node.
|
||||
capacity v1.ResourceList
|
||||
// Interface for cadvisor.
|
||||
cadvisorInterface cadvisor.Interface
|
||||
// Config of this node.
|
||||
nodeConfig NodeConfig
|
||||
}
|
||||
|
||||
var _ ContainerManager = &containerManagerImpl{}
|
||||
func (cm *containerManagerImpl) Start(node *v1.Node,
|
||||
activePods ActivePodsFunc,
|
||||
sourcesReady config.SourcesReady,
|
||||
podStatusProvider status.PodStatusProvider,
|
||||
runtimeService internalapi.RuntimeService) error {
|
||||
klog.V(2).Infof("Starting Windows container manager")
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LocalStorageCapacityIsolation) {
|
||||
rootfs, err := cm.cadvisorInterface.RootFsInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get rootfs info: %v", err)
|
||||
}
|
||||
for rName, rCap := range cadvisor.EphemeralStorageCapacityFromFsInfo(rootfs) {
|
||||
cm.capacity[rName] = rCap
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error {
|
||||
glog.V(2).Infof("Starting Windows stub container manager")
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewContainerManager creates windows container manager.
|
||||
func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, devicePluginEnabled bool, recorder record.EventRecorder) (ContainerManager, error) {
|
||||
return &containerManagerImpl{}, nil
|
||||
var capacity = v1.ResourceList{}
|
||||
// It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because
|
||||
// machine info is computed and cached once as part of cAdvisor object creation.
|
||||
// But `RootFsInfo` and `ImagesFsInfo` are not available at this moment so they will be called later during manager starts
|
||||
machineInfo, err := cadvisorInterface.MachineInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
capacity = cadvisor.CapacityFromMachineInfo(machineInfo)
|
||||
|
||||
return &containerManagerImpl{
|
||||
capacity: capacity,
|
||||
nodeConfig: nodeConfig,
|
||||
cadvisorInterface: cadvisorInterface,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList {
|
||||
return v1.ResourceList{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetNodeConfig() NodeConfig {
|
||||
return NodeConfig{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetMountedSubsystems() *CgroupSubsystems {
|
||||
return &CgroupSubsystems{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetQOSContainersInfo() QOSContainersInfo {
|
||||
return QOSContainersInfo{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) UpdateQOSCgroups() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) Status() Status {
|
||||
return Status{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList {
|
||||
evictionReservation := hardEvictionReservation(cm.nodeConfig.HardEvictionThresholds, cm.capacity)
|
||||
result := make(v1.ResourceList)
|
||||
for k := range cm.capacity {
|
||||
value := resource.NewQuantity(0, resource.DecimalSI)
|
||||
if cm.nodeConfig.SystemReserved != nil {
|
||||
value.Add(cm.nodeConfig.SystemReserved[k])
|
||||
}
|
||||
if cm.nodeConfig.KubeReserved != nil {
|
||||
value.Add(cm.nodeConfig.KubeReserved[k])
|
||||
}
|
||||
if evictionReservation != nil {
|
||||
value.Add(evictionReservation[k])
|
||||
}
|
||||
if !value.IsZero() {
|
||||
result[k] = *value
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetCapacity() v1.ResourceList {
|
||||
return cm.capacity
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetPluginRegistrationHandler() pluginwatcher.PluginHandler {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
|
||||
return nil, nil, []string{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
|
||||
return &podContainerManagerStub{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
|
||||
return &kubecontainer.RunContainerOptions{}, nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle {
|
||||
return &internalContainerLifecycleImpl{cpumanager.NewFakeManager()}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetPodCgroupRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
|
||||
return nil
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD
generated
vendored
@ -20,10 +20,10 @@ go_library(
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -42,11 +42,11 @@ go_test(
|
||||
"//pkg/kubelet/cm/cpumanager/state:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
@ -160,7 +160,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
|
||||
// least a socket's-worth of CPUs.
|
||||
for _, s := range acc.freeSockets() {
|
||||
if acc.needs(acc.topo.CPUsPerSocket()) {
|
||||
glog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s)
|
||||
klog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s)
|
||||
acc.take(acc.details.CPUsInSocket(s))
|
||||
if acc.isSatisfied() {
|
||||
return acc.result, nil
|
||||
@ -172,7 +172,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
|
||||
// a core's-worth of CPUs.
|
||||
for _, c := range acc.freeCores() {
|
||||
if acc.needs(acc.topo.CPUsPerCore()) {
|
||||
glog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c)
|
||||
klog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c)
|
||||
acc.take(acc.details.CPUsInCore(c))
|
||||
if acc.isSatisfied() {
|
||||
return acc.result, nil
|
||||
@ -184,7 +184,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
|
||||
// on the same sockets as the whole cores we have already taken in this
|
||||
// allocation.
|
||||
for _, c := range acc.freeCPUs() {
|
||||
glog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c)
|
||||
klog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c)
|
||||
if acc.needs(1) {
|
||||
acc.take(cpuset.NewCPUSet(c))
|
||||
}
|
||||
|
57
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go
generated
vendored
@ -22,10 +22,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
@ -33,7 +33,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"path"
|
||||
)
|
||||
|
||||
// ActivePodsFunc is a function that returns a list of pods to reconcile.
|
||||
@ -45,8 +44,8 @@ type runtimeService interface {
|
||||
|
||||
type policyName string
|
||||
|
||||
// CPUManagerStateFileName is the name file name where cpu manager stores it's state
|
||||
const CPUManagerStateFileName = "cpu_manager_state"
|
||||
// cpuManagerStateFileName is the name file name where cpu manager stores it's state
|
||||
const cpuManagerStateFileName = "cpu_manager_state"
|
||||
|
||||
// Manager interface provides methods for Kubelet to manage pod cpus.
|
||||
type Manager interface {
|
||||
@ -98,7 +97,7 @@ type manager struct {
|
||||
var _ Manager = &manager{}
|
||||
|
||||
// NewManager creates new cpu manager based on provided policy
|
||||
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirecory string) (Manager, error) {
|
||||
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string) (Manager, error) {
|
||||
var policy Policy
|
||||
|
||||
switch policyName(cpuPolicyName) {
|
||||
@ -111,7 +110,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.Infof("[cpumanager] detected CPU topology: %v", topo)
|
||||
klog.Infof("[cpumanager] detected CPU topology: %v", topo)
|
||||
reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU]
|
||||
if !ok {
|
||||
// The static policy cannot initialize without this information.
|
||||
@ -133,13 +132,14 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
||||
policy = NewStaticPolicy(topo, numReservedCPUs)
|
||||
|
||||
default:
|
||||
glog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone)
|
||||
klog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone)
|
||||
policy = NewNonePolicy()
|
||||
}
|
||||
|
||||
stateImpl := state.NewFileState(
|
||||
path.Join(stateFileDirecory, CPUManagerStateFileName),
|
||||
policy.Name())
|
||||
stateImpl, err := state.NewCheckpointState(stateFileDirectory, cpuManagerStateFileName, policy.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
manager := &manager{
|
||||
policy: policy,
|
||||
@ -152,8 +152,8 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
||||
}
|
||||
|
||||
func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) {
|
||||
glog.Infof("[cpumanager] starting with %s policy", m.policy.Name())
|
||||
glog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod)
|
||||
klog.Infof("[cpumanager] starting with %s policy", m.policy.Name())
|
||||
klog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod)
|
||||
|
||||
m.activePods = activePods
|
||||
m.podStatusProvider = podStatusProvider
|
||||
@ -170,7 +170,7 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e
|
||||
m.Lock()
|
||||
err := m.policy.AddContainer(m.state, p, c, containerID)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] AddContainer error: %v", err)
|
||||
klog.Errorf("[cpumanager] AddContainer error: %v", err)
|
||||
m.Unlock()
|
||||
return err
|
||||
}
|
||||
@ -180,13 +180,17 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e
|
||||
if !cpus.IsEmpty() {
|
||||
err = m.updateContainerCPUSet(containerID, cpus)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] AddContainer error: %v", err)
|
||||
return err
|
||||
klog.Errorf("[cpumanager] AddContainer error: %v", err)
|
||||
m.Lock()
|
||||
err := m.policy.RemoveContainer(m.state, containerID)
|
||||
if err != nil {
|
||||
klog.Errorf("[cpumanager] AddContainer rollback state error: %v", err)
|
||||
}
|
||||
m.Unlock()
|
||||
}
|
||||
} else {
|
||||
glog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty")
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -196,7 +200,7 @@ func (m *manager) RemoveContainer(containerID string) error {
|
||||
|
||||
err := m.policy.RemoveContainer(m.state, containerID)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] RemoveContainer error: %v", err)
|
||||
klog.Errorf("[cpumanager] RemoveContainer error: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -222,14 +226,14 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
for _, container := range allContainers {
|
||||
status, ok := m.podStatusProvider.GetPodStatus(pod.UID)
|
||||
if !ok {
|
||||
glog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name)
|
||||
klog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
|
||||
break
|
||||
}
|
||||
|
||||
containerID, err := findContainerIDByName(&status, container.Name)
|
||||
if err != nil {
|
||||
glog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err)
|
||||
klog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
|
||||
continue
|
||||
}
|
||||
@ -240,11 +244,12 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
// - container has been removed from state by RemoveContainer call (DeletionTimestamp is set)
|
||||
if _, ok := m.state.GetCPUSet(containerID); !ok {
|
||||
if status.Phase == v1.PodRunning && pod.DeletionTimestamp == nil {
|
||||
glog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
klog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
err := m.AddContainer(pod, &container, containerID)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err)
|
||||
klog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// if DeletionTimestamp is set, pod has already been removed from state
|
||||
@ -256,15 +261,15 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
cset := m.state.GetCPUSetOrDefault(containerID)
|
||||
if cset.IsEmpty() {
|
||||
// NOTE: This should not happen outside of tests.
|
||||
glog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name)
|
||||
klog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset)
|
||||
klog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset)
|
||||
err = m.updateContainerCPUSet(containerID, cset)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err)
|
||||
klog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
continue
|
||||
}
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_test.go
generated
vendored
@ -23,16 +23,18 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"os"
|
||||
)
|
||||
|
||||
type mockState struct {
|
||||
@ -139,40 +141,56 @@ func makePod(cpuRequest, cpuLimit string) *v1.Pod {
|
||||
}
|
||||
|
||||
func TestCPUManagerAdd(t *testing.T) {
|
||||
testPolicy := NewStaticPolicy(
|
||||
&topology.CPUTopology{
|
||||
NumCPUs: 4,
|
||||
NumSockets: 1,
|
||||
NumCores: 4,
|
||||
CPUDetails: map[int]topology.CPUInfo{
|
||||
0: {CoreID: 0, SocketID: 0},
|
||||
1: {CoreID: 1, SocketID: 0},
|
||||
2: {CoreID: 2, SocketID: 0},
|
||||
3: {CoreID: 3, SocketID: 0},
|
||||
},
|
||||
}, 0)
|
||||
testCases := []struct {
|
||||
description string
|
||||
regErr error
|
||||
updateErr error
|
||||
policy Policy
|
||||
expCPUSet cpuset.CPUSet
|
||||
expErr error
|
||||
}{
|
||||
{
|
||||
description: "cpu manager add - no error",
|
||||
regErr: nil,
|
||||
updateErr: nil,
|
||||
policy: testPolicy,
|
||||
expCPUSet: cpuset.NewCPUSet(3, 4),
|
||||
expErr: nil,
|
||||
},
|
||||
{
|
||||
description: "cpu manager add - policy add container error",
|
||||
regErr: fmt.Errorf("fake reg error"),
|
||||
updateErr: nil,
|
||||
expErr: fmt.Errorf("fake reg error"),
|
||||
policy: &mockPolicy{
|
||||
err: fmt.Errorf("fake reg error"),
|
||||
},
|
||||
expCPUSet: cpuset.NewCPUSet(1, 2, 3, 4),
|
||||
expErr: fmt.Errorf("fake reg error"),
|
||||
},
|
||||
{
|
||||
description: "cpu manager add - container update error",
|
||||
regErr: nil,
|
||||
updateErr: fmt.Errorf("fake update error"),
|
||||
expErr: nil,
|
||||
policy: testPolicy,
|
||||
expCPUSet: cpuset.NewCPUSet(1, 2, 3, 4),
|
||||
expErr: fmt.Errorf("fake update error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
mgr := &manager{
|
||||
policy: &mockPolicy{
|
||||
err: testCase.regErr,
|
||||
},
|
||||
policy: testCase.policy,
|
||||
state: &mockState{
|
||||
assignments: state.ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4),
|
||||
},
|
||||
containerRuntime: mockRuntimeService{
|
||||
err: testCase.updateErr,
|
||||
@ -181,13 +199,17 @@ func TestCPUManagerAdd(t *testing.T) {
|
||||
podStatusProvider: mockPodStatusProvider{},
|
||||
}
|
||||
|
||||
pod := makePod("1000", "1000")
|
||||
pod := makePod("2", "2")
|
||||
container := &pod.Spec.Containers[0]
|
||||
err := mgr.AddContainer(pod, container, "fakeID")
|
||||
if !reflect.DeepEqual(err, testCase.expErr) {
|
||||
t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v",
|
||||
testCase.description, testCase.expErr, err)
|
||||
}
|
||||
if !testCase.expCPUSet.Equals(mgr.state.GetDefaultCPUSet()) {
|
||||
t.Errorf("CPU Manager AddContainer() error (%v). expected cpuset: %v but got: %v",
|
||||
testCase.description, testCase.expCPUSet, mgr.state.GetDefaultCPUSet())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -199,7 +221,6 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
isTopologyBroken bool
|
||||
expectedPolicy string
|
||||
expectedError error
|
||||
skipIfPermissionsError bool
|
||||
}{
|
||||
{
|
||||
description: "set none policy",
|
||||
@ -218,7 +239,6 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
cpuPolicyName: "static",
|
||||
nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(3, resource.DecimalSI)},
|
||||
expectedPolicy: "static",
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
{
|
||||
description: "static policy - broken topology",
|
||||
@ -226,21 +246,18 @@ func TestCPUManagerGenerate(t *testing.T) {
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
isTopologyBroken: true,
|
||||
expectedError: fmt.Errorf("could not detect number of cpus"),
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
{
|
||||
description: "static policy - broken reservation",
|
||||
cpuPolicyName: "static",
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
expectedError: fmt.Errorf("unable to determine reserved CPU resources for static policy"),
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
{
|
||||
description: "static policy - no CPU resources",
|
||||
cpuPolicyName: "static",
|
||||
nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)},
|
||||
expectedError: fmt.Errorf("the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero"),
|
||||
skipIfPermissionsError: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go
generated
vendored
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package cpumanager
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
)
|
||||
@ -28,21 +28,21 @@ type fakeManager struct {
|
||||
}
|
||||
|
||||
func (m *fakeManager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) {
|
||||
glog.Info("[fake cpumanager] Start()")
|
||||
klog.Info("[fake cpumanager] Start()")
|
||||
}
|
||||
|
||||
func (m *fakeManager) Policy() Policy {
|
||||
glog.Info("[fake cpumanager] Policy()")
|
||||
klog.Info("[fake cpumanager] Policy()")
|
||||
return NewNonePolicy()
|
||||
}
|
||||
|
||||
func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) error {
|
||||
glog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
klog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *fakeManager) RemoveContainer(containerID string) error {
|
||||
glog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID)
|
||||
klog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go
generated
vendored
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package cpumanager
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
)
|
||||
|
||||
@ -39,7 +39,7 @@ func (p *nonePolicy) Name() string {
|
||||
}
|
||||
|
||||
func (p *nonePolicy) Start(s state.State) {
|
||||
glog.Info("[cpumanager] none policy: Start")
|
||||
klog.Info("[cpumanager] none policy: Start")
|
||||
}
|
||||
|
||||
func (p *nonePolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
|
||||
|
39
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
39
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
@ -19,8 +19,8 @@ package cpumanager
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
@ -30,8 +30,6 @@ import (
|
||||
// PolicyStatic is the name of the static policy
|
||||
const PolicyStatic policyName = "static"
|
||||
|
||||
var _ Policy = &staticPolicy{}
|
||||
|
||||
// staticPolicy is a CPU manager policy that does not change CPU
|
||||
// assignments for exclusively pinned guaranteed containers after the main
|
||||
// container process starts.
|
||||
@ -96,7 +94,7 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int) Policy
|
||||
panic(fmt.Sprintf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs))
|
||||
}
|
||||
|
||||
glog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved)
|
||||
klog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved)
|
||||
|
||||
return &staticPolicy{
|
||||
topology: topology,
|
||||
@ -110,7 +108,7 @@ func (p *staticPolicy) Name() string {
|
||||
|
||||
func (p *staticPolicy) Start(s state.State) {
|
||||
if err := p.validateState(s); err != nil {
|
||||
glog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error())
|
||||
klog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error())
|
||||
panic("[cpumanager] - please drain node and remove policy state file")
|
||||
}
|
||||
}
|
||||
@ -131,7 +129,7 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
}
|
||||
|
||||
// State has already been initialized from file (is not empty)
|
||||
// 1 Check if the reserved cpuset is not part of default cpuset because:
|
||||
// 1. Check if the reserved cpuset is not part of default cpuset because:
|
||||
// - kube/system reserved have changed (increased) - may lead to some containers not being able to start
|
||||
// - user tampered with file
|
||||
if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) {
|
||||
@ -147,6 +145,23 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
cID, cset.String(), tmpDefaultCPUset.String())
|
||||
}
|
||||
}
|
||||
|
||||
// 3. It's possible that the set of available CPUs has changed since
|
||||
// the state was written. This can be due to for example
|
||||
// offlining a CPU when kubelet is not running. If this happens,
|
||||
// CPU manager will run into trouble when later it tries to
|
||||
// assign non-existent CPUs to containers. Validate that the
|
||||
// topology that was received during CPU manager startup matches with
|
||||
// the set of CPUs stored in the state.
|
||||
totalKnownCPUs := tmpDefaultCPUset.Clone()
|
||||
for _, cset := range tmpAssignments {
|
||||
totalKnownCPUs = totalKnownCPUs.Union(cset)
|
||||
}
|
||||
if !totalKnownCPUs.Equals(p.topology.CPUDetails.CPUs()) {
|
||||
return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"",
|
||||
p.topology.CPUDetails.CPUs().String(), totalKnownCPUs.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -157,17 +172,17 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet {
|
||||
|
||||
func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
|
||||
if numCPUs := guaranteedCPUs(pod, container); numCPUs != 0 {
|
||||
glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
klog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
|
||||
// container belongs in an exclusively allocated pool
|
||||
|
||||
if _, ok := s.GetCPUSet(containerID); ok {
|
||||
glog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID)
|
||||
klog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
cpuset, err := p.allocateCPUs(s, numCPUs)
|
||||
if err != nil {
|
||||
glog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err)
|
||||
klog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err)
|
||||
return err
|
||||
}
|
||||
s.SetCPUSet(containerID, cpuset)
|
||||
@ -177,7 +192,7 @@ func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Co
|
||||
}
|
||||
|
||||
func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error {
|
||||
glog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID)
|
||||
klog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID)
|
||||
if toRelease, ok := s.GetCPUSet(containerID); ok {
|
||||
s.Delete(containerID)
|
||||
// Mutate the shared pool, adding released cpus.
|
||||
@ -187,7 +202,7 @@ func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error
|
||||
}
|
||||
|
||||
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet, error) {
|
||||
glog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs)
|
||||
klog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs)
|
||||
result, err := takeByTopology(p.topology, p.assignableCPUs(s), numCPUs)
|
||||
if err != nil {
|
||||
return cpuset.NewCPUSet(), err
|
||||
@ -195,7 +210,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet,
|
||||
// Remove allocated CPUs from the shared CPUSet.
|
||||
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result))
|
||||
|
||||
glog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result)
|
||||
klog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static_test.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static_test.go
generated
vendored
@ -87,6 +87,26 @@ func TestStaticPolicyStart(t *testing.T) {
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
|
||||
expPanic: true,
|
||||
},
|
||||
{
|
||||
description: "core 12 is not present in topology but is in state cpuset",
|
||||
topo: topoDualSocketHT,
|
||||
stAssignments: state.ContainerCPUAssignments{
|
||||
"0": cpuset.NewCPUSet(0, 1, 2),
|
||||
"1": cpuset.NewCPUSet(3, 4),
|
||||
},
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7, 8, 9, 10, 11, 12),
|
||||
expPanic: true,
|
||||
},
|
||||
{
|
||||
description: "core 11 is present in topology but is not in state cpuset",
|
||||
topo: topoDualSocketHT,
|
||||
stAssignments: state.ContainerCPUAssignments{
|
||||
"0": cpuset.NewCPUSet(0, 1, 2),
|
||||
"1": cpuset.NewCPUSet(3, 4),
|
||||
},
|
||||
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7, 8, 9, 10),
|
||||
expPanic: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD
generated
vendored
@ -3,23 +3,36 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"state.go",
|
||||
"state_checkpoint.go",
|
||||
"state_file.go",
|
||||
"state_mem.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager/checksum:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager/errors:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["state_file_test.go"],
|
||||
srcs = [
|
||||
"state_checkpoint_test.go",
|
||||
"state_compatibility_test.go",
|
||||
"state_file_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//pkg/kubelet/cm/cpuset:go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager/state/testing:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
@ -31,7 +44,10 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/cm/cpumanager/state/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
||||
)
|
||||
|
||||
var _ checkpointmanager.Checkpoint = &CPUManagerCheckpoint{}
|
||||
|
||||
// CPUManagerCheckpoint struct is used to store cpu/pod assignments in a checkpoint
|
||||
type CPUManagerCheckpoint struct {
|
||||
PolicyName string `json:"policyName"`
|
||||
DefaultCPUSet string `json:"defaultCpuSet"`
|
||||
Entries map[string]string `json:"entries,omitempty"`
|
||||
Checksum checksum.Checksum `json:"checksum"`
|
||||
}
|
||||
|
||||
// NewCPUManagerCheckpoint returns an instance of Checkpoint
|
||||
func NewCPUManagerCheckpoint() *CPUManagerCheckpoint {
|
||||
return &CPUManagerCheckpoint{
|
||||
Entries: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalCheckpoint returns marshalled checkpoint
|
||||
func (cp *CPUManagerCheckpoint) MarshalCheckpoint() ([]byte, error) {
|
||||
// make sure checksum wasn't set before so it doesn't affect output checksum
|
||||
cp.Checksum = 0
|
||||
cp.Checksum = checksum.New(cp)
|
||||
return json.Marshal(*cp)
|
||||
}
|
||||
|
||||
// UnmarshalCheckpoint tries to unmarshal passed bytes to checkpoint
|
||||
func (cp *CPUManagerCheckpoint) UnmarshalCheckpoint(blob []byte) error {
|
||||
return json.Unmarshal(blob, cp)
|
||||
}
|
||||
|
||||
// VerifyChecksum verifies that current checksum of checkpoint is valid
|
||||
func (cp *CPUManagerCheckpoint) VerifyChecksum() error {
|
||||
if cp.Checksum == 0 {
|
||||
// accept empty checksum for compatibility with old file backend
|
||||
return nil
|
||||
}
|
||||
ck := cp.Checksum
|
||||
cp.Checksum = 0
|
||||
err := ck.Verify(cp)
|
||||
cp.Checksum = ck
|
||||
return err
|
||||
}
|
194
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go
generated
vendored
Normal file
194
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
var _ State = &stateCheckpoint{}
|
||||
|
||||
type stateCheckpoint struct {
|
||||
mux sync.RWMutex
|
||||
policyName string
|
||||
cache State
|
||||
checkpointManager checkpointmanager.CheckpointManager
|
||||
checkpointName string
|
||||
}
|
||||
|
||||
// NewCheckpointState creates new State for keeping track of cpu/pod assignment with checkpoint backend
|
||||
func NewCheckpointState(stateDir, checkpointName, policyName string) (State, error) {
|
||||
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err)
|
||||
}
|
||||
stateCheckpoint := &stateCheckpoint{
|
||||
cache: NewMemoryState(),
|
||||
policyName: policyName,
|
||||
checkpointManager: checkpointManager,
|
||||
checkpointName: checkpointName,
|
||||
}
|
||||
|
||||
if err := stateCheckpoint.restoreState(); err != nil {
|
||||
return nil, fmt.Errorf("could not restore state from checkpoint: %v\n"+
|
||||
"Please drain this node and delete the CPU manager checkpoint file %q before restarting Kubelet.",
|
||||
err, path.Join(stateDir, checkpointName))
|
||||
}
|
||||
|
||||
return stateCheckpoint, nil
|
||||
}
|
||||
|
||||
// restores state from a checkpoint and creates it if it doesn't exist
|
||||
func (sc *stateCheckpoint) restoreState() error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
var err error
|
||||
|
||||
// used when all parsing is ok
|
||||
tmpAssignments := make(ContainerCPUAssignments)
|
||||
tmpDefaultCPUSet := cpuset.NewCPUSet()
|
||||
tmpContainerCPUSet := cpuset.NewCPUSet()
|
||||
|
||||
checkpoint := NewCPUManagerCheckpoint()
|
||||
if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpoint); err != nil {
|
||||
if err == errors.ErrCheckpointNotFound {
|
||||
sc.storeState()
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if sc.policyName != checkpoint.PolicyName {
|
||||
return fmt.Errorf("configured policy %q differs from state checkpoint policy %q", sc.policyName, checkpoint.PolicyName)
|
||||
}
|
||||
|
||||
if tmpDefaultCPUSet, err = cpuset.Parse(checkpoint.DefaultCPUSet); err != nil {
|
||||
return fmt.Errorf("could not parse default cpu set %q: %v", checkpoint.DefaultCPUSet, err)
|
||||
}
|
||||
|
||||
for containerID, cpuString := range checkpoint.Entries {
|
||||
if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil {
|
||||
return fmt.Errorf("could not parse cpuset %q for container id %q: %v", cpuString, containerID, err)
|
||||
}
|
||||
tmpAssignments[containerID] = tmpContainerCPUSet
|
||||
}
|
||||
|
||||
sc.cache.SetDefaultCPUSet(tmpDefaultCPUSet)
|
||||
sc.cache.SetCPUAssignments(tmpAssignments)
|
||||
|
||||
klog.V(2).Info("[cpumanager] state checkpoint: restored state from checkpoint")
|
||||
klog.V(2).Infof("[cpumanager] state checkpoint: defaultCPUSet: %s", tmpDefaultCPUSet.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saves state to a checkpoint, caller is responsible for locking
|
||||
func (sc *stateCheckpoint) storeState() {
|
||||
checkpoint := NewCPUManagerCheckpoint()
|
||||
checkpoint.PolicyName = sc.policyName
|
||||
checkpoint.DefaultCPUSet = sc.cache.GetDefaultCPUSet().String()
|
||||
|
||||
for containerID, cset := range sc.cache.GetCPUAssignments() {
|
||||
checkpoint.Entries[containerID] = cset.String()
|
||||
}
|
||||
|
||||
err := sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
|
||||
|
||||
if err != nil {
|
||||
panic("[cpumanager] could not save checkpoint: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// GetCPUSet returns current CPU set
|
||||
func (sc *stateCheckpoint) GetCPUSet(containerID string) (cpuset.CPUSet, bool) {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
|
||||
res, ok := sc.cache.GetCPUSet(containerID)
|
||||
return res, ok
|
||||
}
|
||||
|
||||
// GetDefaultCPUSet returns default CPU set
|
||||
func (sc *stateCheckpoint) GetDefaultCPUSet() cpuset.CPUSet {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
|
||||
return sc.cache.GetDefaultCPUSet()
|
||||
}
|
||||
|
||||
// GetCPUSetOrDefault returns current CPU set, or default one if it wasn't changed
|
||||
func (sc *stateCheckpoint) GetCPUSetOrDefault(containerID string) cpuset.CPUSet {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
|
||||
return sc.cache.GetCPUSetOrDefault(containerID)
|
||||
}
|
||||
|
||||
// GetCPUAssignments returns current CPU to pod assignments
|
||||
func (sc *stateCheckpoint) GetCPUAssignments() ContainerCPUAssignments {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
|
||||
return sc.cache.GetCPUAssignments()
|
||||
}
|
||||
|
||||
// SetCPUSet sets CPU set
|
||||
func (sc *stateCheckpoint) SetCPUSet(containerID string, cset cpuset.CPUSet) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetCPUSet(containerID, cset)
|
||||
sc.storeState()
|
||||
}
|
||||
|
||||
// SetDefaultCPUSet sets default CPU set
|
||||
func (sc *stateCheckpoint) SetDefaultCPUSet(cset cpuset.CPUSet) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetDefaultCPUSet(cset)
|
||||
sc.storeState()
|
||||
}
|
||||
|
||||
// SetCPUAssignments sets CPU to pod assignments
|
||||
func (sc *stateCheckpoint) SetCPUAssignments(a ContainerCPUAssignments) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetCPUAssignments(a)
|
||||
sc.storeState()
|
||||
}
|
||||
|
||||
// Delete deletes assignment for specified pod
|
||||
func (sc *stateCheckpoint) Delete(containerID string) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.Delete(containerID)
|
||||
sc.storeState()
|
||||
}
|
||||
|
||||
// ClearState clears the state and saves it in a checkpoint
|
||||
func (sc *stateCheckpoint) ClearState() {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.ClearState()
|
||||
sc.storeState()
|
||||
}
|
326
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint_test.go
generated
vendored
Normal file
326
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint_test.go
generated
vendored
Normal file
@ -0,0 +1,326 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
testutil "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
const testingCheckpoint = "cpumanager_checkpoint_test"
|
||||
|
||||
var testingDir = os.TempDir()
|
||||
|
||||
func TestCheckpointStateRestore(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
checkpointContent string
|
||||
policyName string
|
||||
expectedError string
|
||||
expectedState *stateMemory
|
||||
}{
|
||||
{
|
||||
"Restore non-existing checkpoint",
|
||||
"",
|
||||
"none",
|
||||
"",
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore default cpu set",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "4-6",
|
||||
"entries": {},
|
||||
"checksum": 2912033808
|
||||
}`,
|
||||
"none",
|
||||
"",
|
||||
&stateMemory{
|
||||
defaultCPUSet: cpuset.NewCPUSet(4, 5, 6),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Restore valid checkpoint",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "1-3",
|
||||
"entries": {
|
||||
"container1": "4-6",
|
||||
"container2": "1-3"
|
||||
},
|
||||
"checksum": 1535905563
|
||||
}`,
|
||||
"none",
|
||||
"",
|
||||
&stateMemory{
|
||||
assignments: ContainerCPUAssignments{
|
||||
"container1": cpuset.NewCPUSet(4, 5, 6),
|
||||
"container2": cpuset.NewCPUSet(1, 2, 3),
|
||||
},
|
||||
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with invalid checksum",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "4-6",
|
||||
"entries": {},
|
||||
"checksum": 1337
|
||||
}`,
|
||||
"none",
|
||||
"checkpoint is corrupted",
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with invalid JSON",
|
||||
`{`,
|
||||
"none",
|
||||
"unexpected end of JSON input",
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with invalid policy name",
|
||||
`{
|
||||
"policyName": "other",
|
||||
"defaultCPUSet": "1-3",
|
||||
"entries": {},
|
||||
"checksum": 4195836012
|
||||
}`,
|
||||
"none",
|
||||
`configured policy "none" differs from state checkpoint policy "other"`,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with unparsable default cpu set",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "1.3",
|
||||
"entries": {},
|
||||
"checksum": 1025273327
|
||||
}`,
|
||||
"none",
|
||||
`could not parse default cpu set "1.3": strconv.Atoi: parsing "1.3": invalid syntax`,
|
||||
&stateMemory{},
|
||||
},
|
||||
{
|
||||
"Restore checkpoint with unparsable assignment entry",
|
||||
`{
|
||||
"policyName": "none",
|
||||
"defaultCPUSet": "1-3",
|
||||
"entries": {
|
||||
"container1": "4-6",
|
||||
"container2": "asd"
|
||||
},
|
||||
"checksum": 2764213924
|
||||
}`,
|
||||
"none",
|
||||
`could not parse cpuset "asd" for container id "container2": strconv.Atoi: parsing "asd": invalid syntax`,
|
||||
&stateMemory{},
|
||||
},
|
||||
}
|
||||
|
||||
// create checkpoint manager for testing
|
||||
cpm, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// ensure there is no previous checkpoint
|
||||
cpm.RemoveCheckpoint(testingCheckpoint)
|
||||
|
||||
// prepare checkpoint for testing
|
||||
if strings.TrimSpace(tc.checkpointContent) != "" {
|
||||
checkpoint := &testutil.MockCheckpoint{Content: tc.checkpointContent}
|
||||
if err := cpm.CreateCheckpoint(testingCheckpoint, checkpoint); err != nil {
|
||||
t.Fatalf("could not create testing checkpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
restoredState, err := NewCheckpointState(testingDir, testingCheckpoint, tc.policyName)
|
||||
if err != nil {
|
||||
if strings.TrimSpace(tc.expectedError) != "" {
|
||||
tc.expectedError = "could not restore state from checkpoint: " + tc.expectedError
|
||||
if strings.HasPrefix(err.Error(), tc.expectedError) {
|
||||
t.Logf("got expected error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Fatalf("unexpected error while creatng checkpointState: %v", err)
|
||||
}
|
||||
|
||||
// compare state after restoration with the one expected
|
||||
AssertStateEqual(t, restoredState, tc.expectedState)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateStore(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
expectedState *stateMemory
|
||||
}{
|
||||
{
|
||||
"Store default cpu set",
|
||||
&stateMemory{defaultCPUSet: cpuset.NewCPUSet(1, 2, 3)},
|
||||
},
|
||||
{
|
||||
"Store assignments",
|
||||
&stateMemory{
|
||||
assignments: map[string]cpuset.CPUSet{
|
||||
"container1": cpuset.NewCPUSet(1, 5, 8),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cpm, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// ensure there is no previous checkpoint
|
||||
cpm.RemoveCheckpoint(testingCheckpoint)
|
||||
|
||||
cs1, err := NewCheckpointState(testingDir, testingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpointState instance: %v", err)
|
||||
}
|
||||
|
||||
// set values of cs1 instance so they are stored in checkpoint and can be read by cs2
|
||||
cs1.SetDefaultCPUSet(tc.expectedState.defaultCPUSet)
|
||||
cs1.SetCPUAssignments(tc.expectedState.assignments)
|
||||
|
||||
// restore checkpoint with previously stored values
|
||||
cs2, err := NewCheckpointState(testingDir, testingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpointState instance: %v", err)
|
||||
}
|
||||
|
||||
AssertStateEqual(t, cs2, tc.expectedState)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateHelpers(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
defaultCPUset cpuset.CPUSet
|
||||
containers map[string]cpuset.CPUSet
|
||||
}{
|
||||
{
|
||||
description: "One container",
|
||||
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
|
||||
containers: map[string]cpuset.CPUSet{
|
||||
"c1": cpuset.NewCPUSet(0, 1),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Two containers",
|
||||
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
|
||||
containers: map[string]cpuset.CPUSet{
|
||||
"c1": cpuset.NewCPUSet(0, 1),
|
||||
"c2": cpuset.NewCPUSet(2, 3, 4, 5),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Container without assigned cpus",
|
||||
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8),
|
||||
containers: map[string]cpuset.CPUSet{
|
||||
"c1": cpuset.NewCPUSet(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cpm, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// ensure there is no previous checkpoint
|
||||
cpm.RemoveCheckpoint(testingCheckpoint)
|
||||
|
||||
state, err := NewCheckpointState(testingDir, testingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpointState instance: %v", err)
|
||||
}
|
||||
state.SetDefaultCPUSet(tc.defaultCPUset)
|
||||
|
||||
for container, set := range tc.containers {
|
||||
state.SetCPUSet(container, set)
|
||||
if cpus, _ := state.GetCPUSet(container); !cpus.Equals(set) {
|
||||
t.Fatalf("state inconsistent, got %q instead of %q", set, cpus)
|
||||
}
|
||||
|
||||
state.Delete(container)
|
||||
if _, ok := state.GetCPUSet(container); ok {
|
||||
t.Fatal("deleted container still existing in state")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateClear(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
defaultCPUset cpuset.CPUSet
|
||||
containers map[string]cpuset.CPUSet
|
||||
}{
|
||||
{
|
||||
"Valid state",
|
||||
cpuset.NewCPUSet(1, 5, 10),
|
||||
map[string]cpuset.CPUSet{
|
||||
"container1": cpuset.NewCPUSet(1, 4),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
state, err := NewCheckpointState(testingDir, testingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpointState instance: %v", err)
|
||||
}
|
||||
|
||||
state.SetDefaultCPUSet(tc.defaultCPUset)
|
||||
state.SetCPUAssignments(tc.containers)
|
||||
|
||||
state.ClearState()
|
||||
if !cpuset.NewCPUSet().Equals(state.GetDefaultCPUSet()) {
|
||||
t.Fatal("cleared state with non-empty default cpu set")
|
||||
}
|
||||
for container := range tc.containers {
|
||||
if _, ok := state.GetCPUSet(container); ok {
|
||||
t.Fatalf("container %q with non-default cpu set in cleared state", container)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
78
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_compatibility_test.go
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_compatibility_test.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
const compatibilityTestingCheckpoint = "cpumanager_state_compatibility_test"
|
||||
|
||||
var state = &stateMemory{
|
||||
assignments: ContainerCPUAssignments{
|
||||
"container1": cpuset.NewCPUSet(4, 5, 6),
|
||||
"container2": cpuset.NewCPUSet(1, 2, 3),
|
||||
},
|
||||
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3),
|
||||
}
|
||||
|
||||
func TestFileToCheckpointCompatibility(t *testing.T) {
|
||||
statePath := path.Join(testingDir, compatibilityTestingCheckpoint)
|
||||
|
||||
// ensure there is no previous state saved at testing path
|
||||
os.Remove(statePath)
|
||||
// ensure testing state is removed after testing
|
||||
defer os.Remove(statePath)
|
||||
|
||||
fileState := NewFileState(statePath, "none")
|
||||
|
||||
fileState.SetDefaultCPUSet(state.defaultCPUSet)
|
||||
fileState.SetCPUAssignments(state.assignments)
|
||||
|
||||
restoredState, err := NewCheckpointState(testingDir, compatibilityTestingCheckpoint, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("could not restore file state: %v", err)
|
||||
}
|
||||
|
||||
AssertStateEqual(t, restoredState, state)
|
||||
}
|
||||
|
||||
func TestCheckpointToFileCompatibility(t *testing.T) {
|
||||
cpm, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create testing checkpoint manager: %v", err)
|
||||
}
|
||||
|
||||
// ensure there is no previous checkpoint
|
||||
cpm.RemoveCheckpoint(compatibilityTestingCheckpoint)
|
||||
// ensure testing checkpoint is removed after testing
|
||||
defer cpm.RemoveCheckpoint(compatibilityTestingCheckpoint)
|
||||
|
||||
checkpointState, err := NewCheckpointState(testingDir, compatibilityTestingCheckpoint, "none")
|
||||
|
||||
checkpointState.SetDefaultCPUSet(state.defaultCPUSet)
|
||||
checkpointState.SetCPUAssignments(state.assignments)
|
||||
|
||||
restoredState := NewFileState(path.Join(testingDir, compatibilityTestingCheckpoint), "none")
|
||||
|
||||
AssertStateEqual(t, restoredState, state)
|
||||
}
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go
generated
vendored
@ -19,8 +19,8 @@ package state
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"os"
|
||||
"sync"
|
||||
@ -79,7 +79,7 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
// If the state file does not exist or has zero length, write a new file.
|
||||
if os.IsNotExist(err) || len(content) == 0 {
|
||||
sf.storeState()
|
||||
glog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath)
|
||||
klog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -92,7 +92,7 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
var readState stateFileData
|
||||
|
||||
if err = json.Unmarshal(content, &readState); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath)
|
||||
klog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -101,13 +101,13 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
}
|
||||
|
||||
if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet)
|
||||
klog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet)
|
||||
return err
|
||||
}
|
||||
|
||||
for containerID, cpuString := range readState.Entries {
|
||||
if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil {
|
||||
glog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString)
|
||||
klog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString)
|
||||
return err
|
||||
}
|
||||
tmpAssignments[containerID] = tmpContainerCPUSet
|
||||
@ -116,8 +116,8 @@ func (sf *stateFile) tryRestoreState() error {
|
||||
sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet)
|
||||
sf.cache.SetCPUAssignments(tmpAssignments)
|
||||
|
||||
glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath)
|
||||
glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String())
|
||||
klog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath)
|
||||
klog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file_test.go
generated
vendored
@ -34,7 +34,8 @@ func writeToStateFile(statefile string, content string) {
|
||||
ioutil.WriteFile(statefile, []byte(content), 0644)
|
||||
}
|
||||
|
||||
func stateEqual(t *testing.T, sf State, sm State) {
|
||||
// AssertStateEqual marks provided test as failed if provided states differ
|
||||
func AssertStateEqual(t *testing.T, sf State, sm State) {
|
||||
cpusetSf := sf.GetDefaultCPUSet()
|
||||
cpusetSm := sm.GetDefaultCPUSet()
|
||||
if !cpusetSf.Equals(cpusetSm) {
|
||||
@ -253,7 +254,7 @@ func TestFileStateTryRestore(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
stateEqual(t, fileState, tc.expectedState)
|
||||
AssertStateEqual(t, fileState, tc.expectedState)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -363,7 +364,7 @@ func TestUpdateStateFile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
newFileState := NewFileState(sfilePath.Name(), "static")
|
||||
stateEqual(t, newFileState, tc.expectedState)
|
||||
AssertStateEqual(t, newFileState, tc.expectedState)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -471,7 +472,6 @@ func TestClearStateStateFile(t *testing.T) {
|
||||
t.Error("cleared state shoudn't has got information about containers")
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go
generated
vendored
@ -19,7 +19,7 @@ package state
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
@ -33,7 +33,7 @@ var _ State = &stateMemory{}
|
||||
|
||||
// NewMemoryState creates new State for keeping track of cpu/pod assignment
|
||||
func NewMemoryState() State {
|
||||
glog.Infof("[cpumanager] initializing new in-memory state store")
|
||||
klog.Infof("[cpumanager] initializing new in-memory state store")
|
||||
return &stateMemory{
|
||||
assignments: ContainerCPUAssignments{},
|
||||
defaultCPUSet: cpuset.NewCPUSet(),
|
||||
@ -73,7 +73,7 @@ func (s *stateMemory) SetCPUSet(containerID string, cset cpuset.CPUSet) {
|
||||
defer s.Unlock()
|
||||
|
||||
s.assignments[containerID] = cset
|
||||
glog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset)
|
||||
klog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset)
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) {
|
||||
@ -81,7 +81,7 @@ func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) {
|
||||
defer s.Unlock()
|
||||
|
||||
s.defaultCPUSet = cset
|
||||
glog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset)
|
||||
klog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset)
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) {
|
||||
@ -89,7 +89,7 @@ func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) {
|
||||
defer s.Unlock()
|
||||
|
||||
s.assignments = a.Clone()
|
||||
glog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a)
|
||||
klog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a)
|
||||
}
|
||||
|
||||
func (s *stateMemory) Delete(containerID string) {
|
||||
@ -97,7 +97,7 @@ func (s *stateMemory) Delete(containerID string) {
|
||||
defer s.Unlock()
|
||||
|
||||
delete(s.assignments, containerID)
|
||||
glog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID)
|
||||
klog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID)
|
||||
}
|
||||
|
||||
func (s *stateMemory) ClearState() {
|
||||
@ -106,5 +106,5 @@ func (s *stateMemory) ClearState() {
|
||||
|
||||
s.defaultCPUSet = cpuset.CPUSet{}
|
||||
s.assignments = make(ContainerCPUAssignments)
|
||||
glog.V(2).Infof("[cpumanager] cleared state")
|
||||
klog.V(2).Infof("[cpumanager] cleared state")
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing/BUILD
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing/BUILD
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//pkg/kubelet/checkpointmanager:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
41
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing/util.go
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/testing/util.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import "k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
|
||||
var _ checkpointmanager.Checkpoint = &MockCheckpoint{}
|
||||
|
||||
// MockCheckpoint struct is used for mocking checkpoint values in testing
|
||||
type MockCheckpoint struct {
|
||||
Content string
|
||||
}
|
||||
|
||||
// MarshalCheckpoint returns fake content
|
||||
func (mc *MockCheckpoint) MarshalCheckpoint() ([]byte, error) {
|
||||
return []byte(mc.Content), nil
|
||||
}
|
||||
|
||||
// UnmarshalCheckpoint fakes unmarshaling
|
||||
func (mc *MockCheckpoint) UnmarshalCheckpoint(blob []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyChecksum fakes verifying checksum
|
||||
func (mc *MockCheckpoint) VerifyChecksum() error {
|
||||
return nil
|
||||
}
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD
generated
vendored
@ -10,8 +10,8 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
)
|
||||
|
||||
@ -156,7 +156,7 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) {
|
||||
numPhysicalCores += len(socket.Cores)
|
||||
for _, core := range socket.Cores {
|
||||
if coreID, err = getUniqueCoreID(core.Threads); err != nil {
|
||||
glog.Errorf("could not get unique coreID for socket: %d core %d threads: %v",
|
||||
klog.Errorf("could not get unique coreID for socket: %d core %d threads: %v",
|
||||
socket.Id, core.Id, core.Threads)
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD
generated
vendored
@ -5,7 +5,7 @@ go_library(
|
||||
srcs = ["cpuset.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpuset",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
deps = ["//vendor/k8s.io/klog:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go
generated
vendored
@ -19,7 +19,7 @@ package cpuset
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
@ -221,7 +221,7 @@ func (s CPUSet) String() string {
|
||||
func MustParse(s string) CPUSet {
|
||||
res, err := Parse(s)
|
||||
if err != nil {
|
||||
glog.Fatalf("unable to parse [%s] as CPUSet: %v", s, err)
|
||||
klog.Fatalf("unable to parse [%s] as CPUSet: %v", s, err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD
generated
vendored
@ -15,6 +15,8 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1:go_default_library",
|
||||
"//pkg/kubelet/apis/podresources/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager/errors:go_default_library",
|
||||
"//pkg/kubelet/cm/devicemanager/checkpoint:go_default_library",
|
||||
@ -22,12 +24,13 @@ go_library(
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/util/pluginwatcher:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -40,16 +43,18 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/util/pluginwatcher:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go
generated
vendored
@ -28,12 +28,15 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
watcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
||||
)
|
||||
|
||||
// Stub implementation for DevicePlugin.
|
||||
type Stub struct {
|
||||
devs []*pluginapi.Device
|
||||
socket string
|
||||
devs []*pluginapi.Device
|
||||
socket string
|
||||
resourceName string
|
||||
preStartContainerFlag bool
|
||||
|
||||
stop chan interface{}
|
||||
wg sync.WaitGroup
|
||||
@ -43,6 +46,10 @@ type Stub struct {
|
||||
|
||||
// allocFunc is used for handling allocation request
|
||||
allocFunc stubAllocFunc
|
||||
|
||||
registrationStatus chan watcherapi.RegistrationStatus // for testing
|
||||
endpoint string // for testing
|
||||
|
||||
}
|
||||
|
||||
// stubAllocFunc is the function called when receive an allocation request from Kubelet
|
||||
@ -55,10 +62,12 @@ func defaultAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.De
|
||||
}
|
||||
|
||||
// NewDevicePluginStub returns an initialized DevicePlugin Stub.
|
||||
func NewDevicePluginStub(devs []*pluginapi.Device, socket string) *Stub {
|
||||
func NewDevicePluginStub(devs []*pluginapi.Device, socket string, name string, preStartContainerFlag bool) *Stub {
|
||||
return &Stub{
|
||||
devs: devs,
|
||||
socket: socket,
|
||||
devs: devs,
|
||||
socket: socket,
|
||||
resourceName: name,
|
||||
preStartContainerFlag: preStartContainerFlag,
|
||||
|
||||
stop: make(chan interface{}),
|
||||
update: make(chan []*pluginapi.Device),
|
||||
@ -88,6 +97,7 @@ func (m *Stub) Start() error {
|
||||
m.wg.Add(1)
|
||||
m.server = grpc.NewServer([]grpc.ServerOption{}...)
|
||||
pluginapi.RegisterDevicePluginServer(m.server, m)
|
||||
watcherapi.RegisterRegistrationServer(m.server, m)
|
||||
|
||||
go func() {
|
||||
defer m.wg.Done()
|
||||
@ -118,8 +128,36 @@ func (m *Stub) Stop() error {
|
||||
return m.cleanup()
|
||||
}
|
||||
|
||||
// GetInfo is the RPC which return pluginInfo
|
||||
func (m *Stub) GetInfo(ctx context.Context, req *watcherapi.InfoRequest) (*watcherapi.PluginInfo, error) {
|
||||
log.Println("GetInfo")
|
||||
return &watcherapi.PluginInfo{
|
||||
Type: watcherapi.DevicePlugin,
|
||||
Name: m.resourceName,
|
||||
Endpoint: m.endpoint,
|
||||
SupportedVersions: []string{pluginapi.Version}}, nil
|
||||
}
|
||||
|
||||
// NotifyRegistrationStatus receives the registration notification from watcher
|
||||
func (m *Stub) NotifyRegistrationStatus(ctx context.Context, status *watcherapi.RegistrationStatus) (*watcherapi.RegistrationStatusResponse, error) {
|
||||
if m.registrationStatus != nil {
|
||||
m.registrationStatus <- *status
|
||||
}
|
||||
if !status.PluginRegistered {
|
||||
log.Println("Registration failed: ", status.Error)
|
||||
}
|
||||
return &watcherapi.RegistrationStatusResponse{}, nil
|
||||
}
|
||||
|
||||
// Register registers the device plugin for the given resourceName with Kubelet.
|
||||
func (m *Stub) Register(kubeletEndpoint, resourceName string, preStartContainerFlag bool) error {
|
||||
func (m *Stub) Register(kubeletEndpoint, resourceName string, pluginSockDir string) error {
|
||||
if pluginSockDir != "" {
|
||||
if _, err := os.Stat(pluginSockDir + "DEPRECATION"); err == nil {
|
||||
log.Println("Deprecation file found. Skip registration.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.Println("Deprecation file not found. Invoke registration")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@ -127,16 +165,16 @@ func (m *Stub) Register(kubeletEndpoint, resourceName string, preStartContainerF
|
||||
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout("unix", addr, timeout)
|
||||
}))
|
||||
defer conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pluginapi.NewRegistrationClient(conn)
|
||||
reqt := &pluginapi.RegisterRequest{
|
||||
Version: pluginapi.Version,
|
||||
Endpoint: path.Base(m.socket),
|
||||
ResourceName: resourceName,
|
||||
Options: &pluginapi.DevicePluginOptions{PreStartRequired: preStartContainerFlag},
|
||||
Options: &pluginapi.DevicePluginOptions{PreStartRequired: m.preStartContainerFlag},
|
||||
}
|
||||
|
||||
_, err = client.Register(context.Background(), reqt)
|
||||
@ -148,7 +186,7 @@ func (m *Stub) Register(kubeletEndpoint, resourceName string, preStartContainerF
|
||||
|
||||
// GetDevicePluginOptions returns DevicePluginOptions settings for the device plugin.
|
||||
func (m *Stub) GetDevicePluginOptions(ctx context.Context, e *pluginapi.Empty) (*pluginapi.DevicePluginOptions, error) {
|
||||
return &pluginapi.DevicePluginOptions{}, nil
|
||||
return &pluginapi.DevicePluginOptions{PreStartRequired: m.preStartContainerFlag}, nil
|
||||
}
|
||||
|
||||
// PreStartContainer resets the devices received
|
||||
|
105
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go
generated
vendored
105
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go
generated
vendored
@ -23,8 +23,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/klog"
|
||||
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
)
|
||||
@ -37,8 +37,7 @@ type endpoint interface {
|
||||
stop()
|
||||
allocate(devs []string) (*pluginapi.AllocateResponse, error)
|
||||
preStartContainer(devs []string) (*pluginapi.PreStartContainerResponse, error)
|
||||
getDevices() []pluginapi.Device
|
||||
callback(resourceName string, added, updated, deleted []pluginapi.Device)
|
||||
callback(resourceName string, devices []pluginapi.Device)
|
||||
isStopped() bool
|
||||
stopGracePeriodExpired() bool
|
||||
}
|
||||
@ -51,18 +50,16 @@ type endpointImpl struct {
|
||||
resourceName string
|
||||
stopTime time.Time
|
||||
|
||||
devices map[string]pluginapi.Device
|
||||
mutex sync.Mutex
|
||||
|
||||
cb monitorCallback
|
||||
mutex sync.Mutex
|
||||
cb monitorCallback
|
||||
}
|
||||
|
||||
// newEndpoint creates a new endpoint for the given resourceName.
|
||||
// This is to be used during normal device plugin registration.
|
||||
func newEndpointImpl(socketPath, resourceName string, devices map[string]pluginapi.Device, callback monitorCallback) (*endpointImpl, error) {
|
||||
func newEndpointImpl(socketPath, resourceName string, callback monitorCallback) (*endpointImpl, error) {
|
||||
client, c, err := dial(socketPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err)
|
||||
klog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -73,120 +70,52 @@ func newEndpointImpl(socketPath, resourceName string, devices map[string]plugina
|
||||
socketPath: socketPath,
|
||||
resourceName: resourceName,
|
||||
|
||||
devices: devices,
|
||||
cb: callback,
|
||||
cb: callback,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newStoppedEndpointImpl creates a new endpoint for the given resourceName with stopTime set.
|
||||
// This is to be used during Kubelet restart, before the actual device plugin re-registers.
|
||||
func newStoppedEndpointImpl(resourceName string, devices map[string]pluginapi.Device) *endpointImpl {
|
||||
func newStoppedEndpointImpl(resourceName string) *endpointImpl {
|
||||
return &endpointImpl{
|
||||
resourceName: resourceName,
|
||||
devices: devices,
|
||||
stopTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *endpointImpl) callback(resourceName string, added, updated, deleted []pluginapi.Device) {
|
||||
e.cb(resourceName, added, updated, deleted)
|
||||
}
|
||||
|
||||
func (e *endpointImpl) getDevices() []pluginapi.Device {
|
||||
e.mutex.Lock()
|
||||
defer e.mutex.Unlock()
|
||||
var devs []pluginapi.Device
|
||||
|
||||
for _, d := range e.devices {
|
||||
devs = append(devs, d)
|
||||
}
|
||||
|
||||
return devs
|
||||
func (e *endpointImpl) callback(resourceName string, devices []pluginapi.Device) {
|
||||
e.cb(resourceName, devices)
|
||||
}
|
||||
|
||||
// run initializes ListAndWatch gRPC call for the device plugin and
|
||||
// blocks on receiving ListAndWatch gRPC stream updates. Each ListAndWatch
|
||||
// stream update contains a new list of device states. listAndWatch compares the new
|
||||
// device states with its cached states to get list of new, updated, and deleted devices.
|
||||
// stream update contains a new list of device states.
|
||||
// It then issues a callback to pass this information to the device manager which
|
||||
// will adjust the resource available information accordingly.
|
||||
func (e *endpointImpl) run() {
|
||||
stream, err := e.client.ListAndWatch(context.Background(), &pluginapi.Empty{})
|
||||
if err != nil {
|
||||
glog.Errorf(errListAndWatch, e.resourceName, err)
|
||||
klog.Errorf(errListAndWatch, e.resourceName, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
devices := make(map[string]pluginapi.Device)
|
||||
|
||||
e.mutex.Lock()
|
||||
for _, d := range e.devices {
|
||||
devices[d.ID] = d
|
||||
}
|
||||
e.mutex.Unlock()
|
||||
|
||||
for {
|
||||
response, err := stream.Recv()
|
||||
if err != nil {
|
||||
glog.Errorf(errListAndWatch, e.resourceName, err)
|
||||
klog.Errorf(errListAndWatch, e.resourceName, err)
|
||||
return
|
||||
}
|
||||
|
||||
devs := response.Devices
|
||||
glog.V(2).Infof("State pushed for device plugin %s", e.resourceName)
|
||||
|
||||
newDevs := make(map[string]*pluginapi.Device)
|
||||
var added, updated []pluginapi.Device
|
||||
klog.V(2).Infof("State pushed for device plugin %s", e.resourceName)
|
||||
|
||||
var newDevs []pluginapi.Device
|
||||
for _, d := range devs {
|
||||
dOld, ok := devices[d.ID]
|
||||
newDevs[d.ID] = d
|
||||
|
||||
if !ok {
|
||||
glog.V(2).Infof("New device for Endpoint %s: %v", e.resourceName, d)
|
||||
|
||||
devices[d.ID] = *d
|
||||
added = append(added, *d)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if d.Health == dOld.Health {
|
||||
continue
|
||||
}
|
||||
|
||||
if d.Health == pluginapi.Unhealthy {
|
||||
glog.Errorf("Device %s is now Unhealthy", d.ID)
|
||||
} else if d.Health == pluginapi.Healthy {
|
||||
glog.V(2).Infof("Device %s is now Healthy", d.ID)
|
||||
}
|
||||
|
||||
devices[d.ID] = *d
|
||||
updated = append(updated, *d)
|
||||
newDevs = append(newDevs, *d)
|
||||
}
|
||||
|
||||
var deleted []pluginapi.Device
|
||||
for id, d := range devices {
|
||||
if _, ok := newDevs[id]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
glog.Errorf("Device %s was deleted", d.ID)
|
||||
|
||||
deleted = append(deleted, d)
|
||||
delete(devices, id)
|
||||
}
|
||||
|
||||
e.mutex.Lock()
|
||||
// NOTE: Return a copy of 'devices' instead of returning a direct reference to local 'devices'
|
||||
e.devices = make(map[string]pluginapi.Device)
|
||||
for _, d := range devices {
|
||||
e.devices[d.ID] = d
|
||||
}
|
||||
e.mutex.Unlock()
|
||||
|
||||
e.callback(e.resourceName, added, updated, deleted)
|
||||
e.callback(e.resourceName, newDevs)
|
||||
}
|
||||
}
|
||||
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint_test.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint_test.go
generated
vendored
@ -37,7 +37,7 @@ func TestNewEndpoint(t *testing.T) {
|
||||
{ID: "ADeviceId", Health: pluginapi.Healthy},
|
||||
}
|
||||
|
||||
p, e := esetup(t, devs, socket, "mock", func(n string, a, u, r []pluginapi.Device) {})
|
||||
p, e := esetup(t, devs, socket, "mock", func(n string, d []pluginapi.Device) {})
|
||||
defer ecleanup(t, p, e)
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ func TestRun(t *testing.T) {
|
||||
|
||||
callbackCount := 0
|
||||
callbackChan := make(chan int)
|
||||
callback := func(n string, a, u, r []pluginapi.Device) {
|
||||
callback := func(n string, devices []pluginapi.Device) {
|
||||
// Should be called twice:
|
||||
// one for plugin registration, one for plugin update.
|
||||
if callbackCount > 2 {
|
||||
@ -67,23 +67,24 @@ func TestRun(t *testing.T) {
|
||||
|
||||
// Check plugin registration
|
||||
if callbackCount == 0 {
|
||||
require.Len(t, a, 3)
|
||||
require.Len(t, u, 0)
|
||||
require.Len(t, r, 0)
|
||||
require.Len(t, devices, 3)
|
||||
require.Equal(t, devices[0].ID, devs[0].ID)
|
||||
require.Equal(t, devices[1].ID, devs[1].ID)
|
||||
require.Equal(t, devices[2].ID, devs[2].ID)
|
||||
require.Equal(t, devices[0].Health, devs[0].Health)
|
||||
require.Equal(t, devices[1].Health, devs[1].Health)
|
||||
require.Equal(t, devices[2].Health, devs[2].Health)
|
||||
}
|
||||
|
||||
// Check plugin update
|
||||
if callbackCount == 1 {
|
||||
require.Len(t, a, 1)
|
||||
require.Len(t, u, 2)
|
||||
require.Len(t, r, 1)
|
||||
|
||||
require.Equal(t, a[0].ID, updated[2].ID)
|
||||
require.Equal(t, u[0].ID, updated[0].ID)
|
||||
require.Equal(t, u[0].Health, updated[0].Health)
|
||||
require.Equal(t, u[1].ID, updated[1].ID)
|
||||
require.Equal(t, u[1].Health, updated[1].Health)
|
||||
require.Equal(t, r[0].ID, devs[1].ID)
|
||||
require.Len(t, devices, 3)
|
||||
require.Equal(t, devices[0].ID, updated[0].ID)
|
||||
require.Equal(t, devices[1].ID, updated[1].ID)
|
||||
require.Equal(t, devices[2].ID, updated[2].ID)
|
||||
require.Equal(t, devices[0].Health, updated[0].Health)
|
||||
require.Equal(t, devices[1].Health, updated[1].Health)
|
||||
require.Equal(t, devices[2].Health, updated[2].Health)
|
||||
}
|
||||
|
||||
callbackCount++
|
||||
@ -102,18 +103,7 @@ func TestRun(t *testing.T) {
|
||||
// Wait for the second callback to be issued.
|
||||
<-callbackChan
|
||||
|
||||
e.mutex.Lock()
|
||||
defer e.mutex.Unlock()
|
||||
|
||||
require.Len(t, e.devices, 3)
|
||||
for _, dref := range updated {
|
||||
d, ok := e.devices[dref.ID]
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, d.ID, dref.ID)
|
||||
require.Equal(t, d.Health, dref.Health)
|
||||
}
|
||||
|
||||
require.Equal(t, callbackCount, 2)
|
||||
}
|
||||
|
||||
func TestAllocate(t *testing.T) {
|
||||
@ -123,7 +113,7 @@ func TestAllocate(t *testing.T) {
|
||||
}
|
||||
callbackCount := 0
|
||||
callbackChan := make(chan int)
|
||||
p, e := esetup(t, devs, socket, "mock", func(n string, a, u, r []pluginapi.Device) {
|
||||
p, e := esetup(t, devs, socket, "mock", func(n string, d []pluginapi.Device) {
|
||||
callbackCount++
|
||||
callbackChan <- callbackCount
|
||||
})
|
||||
@ -169,23 +159,13 @@ func TestAllocate(t *testing.T) {
|
||||
require.Equal(t, resp, respOut)
|
||||
}
|
||||
|
||||
func TestGetDevices(t *testing.T) {
|
||||
e := endpointImpl{
|
||||
devices: map[string]pluginapi.Device{
|
||||
"ADeviceId": {ID: "ADeviceId", Health: pluginapi.Healthy},
|
||||
},
|
||||
}
|
||||
devs := e.getDevices()
|
||||
require.Len(t, devs, 1)
|
||||
}
|
||||
|
||||
func esetup(t *testing.T, devs []*pluginapi.Device, socket, resourceName string, callback monitorCallback) (*Stub, *endpointImpl) {
|
||||
p := NewDevicePluginStub(devs, socket)
|
||||
p := NewDevicePluginStub(devs, socket, resourceName, false)
|
||||
|
||||
err := p.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
e, err := newEndpointImpl(socket, resourceName, make(map[string]pluginapi.Device), callback)
|
||||
e, err := newEndpointImpl(socket, resourceName, callback)
|
||||
require.NoError(t, err)
|
||||
|
||||
return p, e
|
||||
|
288
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go
generated
vendored
288
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go
generated
vendored
@ -25,20 +25,22 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
@ -48,14 +50,14 @@ type ActivePodsFunc func() []*v1.Pod
|
||||
// monitorCallback is the function called when a device's health state changes,
|
||||
// or new devices are reported, or old devices are deleted.
|
||||
// Updated contains the most recent state of the Device.
|
||||
type monitorCallback func(resourceName string, added, updated, deleted []pluginapi.Device)
|
||||
type monitorCallback func(resourceName string, devices []pluginapi.Device)
|
||||
|
||||
// ManagerImpl is the structure in charge of managing Device Plugins.
|
||||
type ManagerImpl struct {
|
||||
socketname string
|
||||
socketdir string
|
||||
|
||||
endpoints map[string]endpoint // Key is ResourceName
|
||||
endpoints map[string]endpointInfo // Key is ResourceName
|
||||
mutex sync.Mutex
|
||||
|
||||
server *grpc.Server
|
||||
@ -85,10 +87,14 @@ type ManagerImpl struct {
|
||||
|
||||
// podDevices contains pod to allocated device mapping.
|
||||
podDevices podDevices
|
||||
pluginOpts map[string]*pluginapi.DevicePluginOptions
|
||||
checkpointManager checkpointmanager.CheckpointManager
|
||||
}
|
||||
|
||||
type endpointInfo struct {
|
||||
e endpoint
|
||||
opts *pluginapi.DevicePluginOptions
|
||||
}
|
||||
|
||||
type sourcesReadyStub struct{}
|
||||
|
||||
func (s *sourcesReadyStub) AddSource(source string) {}
|
||||
@ -100,21 +106,21 @@ func NewManagerImpl() (*ManagerImpl, error) {
|
||||
}
|
||||
|
||||
func newManagerImpl(socketPath string) (*ManagerImpl, error) {
|
||||
glog.V(2).Infof("Creating Device Plugin manager at %s", socketPath)
|
||||
klog.V(2).Infof("Creating Device Plugin manager at %s", socketPath)
|
||||
|
||||
if socketPath == "" || !filepath.IsAbs(socketPath) {
|
||||
return nil, fmt.Errorf(errBadSocket+" %v", socketPath)
|
||||
return nil, fmt.Errorf(errBadSocket+" %s", socketPath)
|
||||
}
|
||||
|
||||
dir, file := filepath.Split(socketPath)
|
||||
manager := &ManagerImpl{
|
||||
endpoints: make(map[string]endpoint),
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
|
||||
socketname: file,
|
||||
socketdir: dir,
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
pluginOpts: make(map[string]*pluginapi.DevicePluginOptions),
|
||||
podDevices: make(podDevices),
|
||||
}
|
||||
manager.callback = manager.genericDeviceUpdateCallback
|
||||
@ -125,35 +131,24 @@ func newManagerImpl(socketPath string) (*ManagerImpl, error) {
|
||||
manager.sourcesReady = &sourcesReadyStub{}
|
||||
checkpointManager, err := checkpointmanager.NewCheckpointManager(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize checkpoint manager: %+v", err)
|
||||
return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err)
|
||||
}
|
||||
manager.checkpointManager = checkpointManager
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, added, updated, deleted []pluginapi.Device) {
|
||||
kept := append(updated, added...)
|
||||
func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices []pluginapi.Device) {
|
||||
m.mutex.Lock()
|
||||
if _, ok := m.healthyDevices[resourceName]; !ok {
|
||||
m.healthyDevices[resourceName] = sets.NewString()
|
||||
}
|
||||
if _, ok := m.unhealthyDevices[resourceName]; !ok {
|
||||
m.unhealthyDevices[resourceName] = sets.NewString()
|
||||
}
|
||||
for _, dev := range kept {
|
||||
m.healthyDevices[resourceName] = sets.NewString()
|
||||
m.unhealthyDevices[resourceName] = sets.NewString()
|
||||
for _, dev := range devices {
|
||||
if dev.Health == pluginapi.Healthy {
|
||||
m.healthyDevices[resourceName].Insert(dev.ID)
|
||||
m.unhealthyDevices[resourceName].Delete(dev.ID)
|
||||
} else {
|
||||
m.unhealthyDevices[resourceName].Insert(dev.ID)
|
||||
m.healthyDevices[resourceName].Delete(dev.ID)
|
||||
}
|
||||
}
|
||||
for _, dev := range deleted {
|
||||
m.healthyDevices[resourceName].Delete(dev.ID)
|
||||
m.unhealthyDevices[resourceName].Delete(dev.ID)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
m.writeCheckpoint()
|
||||
}
|
||||
@ -175,7 +170,7 @@ func (m *ManagerImpl) removeContents(dir string) error {
|
||||
}
|
||||
stat, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to stat file %v: %v", filePath, err)
|
||||
klog.Errorf("Failed to stat file %s: %v", filePath, err)
|
||||
continue
|
||||
}
|
||||
if stat.IsDir() {
|
||||
@ -198,8 +193,7 @@ func (m *ManagerImpl) checkpointFile() string {
|
||||
// podDevices and allocatedDevices information from checkpoint-ed state and
|
||||
// starts device plugin registration service.
|
||||
func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady) error {
|
||||
glog.V(2).Infof("Starting Device Plugin manager")
|
||||
fmt.Println("Starting Device Plugin manager")
|
||||
klog.V(2).Infof("Starting Device Plugin manager")
|
||||
|
||||
m.activePods = activePods
|
||||
m.sourcesReady = sourcesReady
|
||||
@ -207,7 +201,7 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc
|
||||
// Loads in allocatedDevices information from disk.
|
||||
err := m.readCheckpoint()
|
||||
if err != nil {
|
||||
glog.Warningf("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date. Err: %v", err)
|
||||
klog.Warningf("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date. Err: %v", err)
|
||||
}
|
||||
|
||||
socketPath := filepath.Join(m.socketdir, m.socketname)
|
||||
@ -216,12 +210,12 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc
|
||||
// Removes all stale sockets in m.socketdir. Device plugins can monitor
|
||||
// this and use it as a signal to re-register with the new Kubelet.
|
||||
if err := m.removeContents(m.socketdir); err != nil {
|
||||
glog.Errorf("Fail to clean up stale contents under %s: %+v", m.socketdir, err)
|
||||
klog.Errorf("Fail to clean up stale contents under %s: %v", m.socketdir, err)
|
||||
}
|
||||
|
||||
s, err := net.Listen("unix", socketPath)
|
||||
if err != nil {
|
||||
glog.Errorf(errListenSocket+" %+v", err)
|
||||
klog.Errorf(errListenSocket+" %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -234,24 +228,87 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc
|
||||
m.server.Serve(s)
|
||||
}()
|
||||
|
||||
glog.V(2).Infof("Serving device plugin registration server on %q", socketPath)
|
||||
klog.V(2).Infof("Serving device plugin registration server on %q", socketPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Devices is the map of devices that are known by the Device
|
||||
// Plugin manager with the kind of the devices as key
|
||||
func (m *ManagerImpl) Devices() map[string][]pluginapi.Device {
|
||||
// GetWatcherHandler returns the plugin handler
|
||||
func (m *ManagerImpl) GetWatcherHandler() watcher.PluginHandler {
|
||||
if f, err := os.Create(m.socketdir + "DEPRECATION"); err != nil {
|
||||
klog.Errorf("Failed to create deprecation file at %s", m.socketdir)
|
||||
} else {
|
||||
f.Close()
|
||||
klog.V(4).Infof("created deprecation file %s", f.Name())
|
||||
}
|
||||
|
||||
return watcher.PluginHandler(m)
|
||||
}
|
||||
|
||||
// ValidatePlugin validates a plugin if the version is correct and the name has the format of an extended resource
|
||||
func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error {
|
||||
klog.V(2).Infof("Got Plugin %s at endpoint %s with versions %v", pluginName, endpoint, versions)
|
||||
|
||||
if !m.isVersionCompatibleWithPlugin(versions) {
|
||||
return fmt.Errorf("manager version, %s, is not among plugin supported versions %v", pluginapi.Version, versions)
|
||||
}
|
||||
|
||||
if !v1helper.IsExtendedResourceName(v1.ResourceName(pluginName)) {
|
||||
return fmt.Errorf("invalid name of device plugin socket: %s", fmt.Sprintf(errInvalidResourceName, pluginName))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterPlugin starts the endpoint and registers it
|
||||
// TODO: Start the endpoint and wait for the First ListAndWatch call
|
||||
// before registering the plugin
|
||||
func (m *ManagerImpl) RegisterPlugin(pluginName string, endpoint string, versions []string) error {
|
||||
klog.V(2).Infof("Registering Plugin %s at endpoint %s", pluginName, endpoint)
|
||||
|
||||
e, err := newEndpointImpl(endpoint, pluginName, m.callback)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to dial device plugin with socketPath %s: %v", endpoint, err)
|
||||
}
|
||||
|
||||
options, err := e.client.GetDevicePluginOptions(context.Background(), &pluginapi.Empty{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get device plugin options: %v", err)
|
||||
}
|
||||
|
||||
m.registerEndpoint(pluginName, options, e)
|
||||
go m.runEndpoint(pluginName, e)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeRegisterPlugin deregisters the plugin
|
||||
// TODO work on the behavior for deregistering plugins
|
||||
// e.g: Should we delete the resource
|
||||
func (m *ManagerImpl) DeRegisterPlugin(pluginName string) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
devs := make(map[string][]pluginapi.Device)
|
||||
for k, e := range m.endpoints {
|
||||
glog.V(3).Infof("Endpoint: %+v: %p", k, e)
|
||||
devs[k] = e.getDevices()
|
||||
// Note: This will mark the resource unhealthy as per the behavior
|
||||
// in runEndpoint
|
||||
if eI, ok := m.endpoints[pluginName]; ok {
|
||||
eI.e.stop()
|
||||
}
|
||||
}
|
||||
|
||||
return devs
|
||||
func (m *ManagerImpl) isVersionCompatibleWithPlugin(versions []string) bool {
|
||||
// TODO(vikasc): Currently this is fine as we only have a single supported version. When we do need to support
|
||||
// multiple versions in the future, we may need to extend this function to return a supported version.
|
||||
// E.g., say kubelet supports v1beta1 and v1beta2, and we get v1alpha1 and v1beta1 from a device plugin,
|
||||
// this function should return v1beta1
|
||||
for _, version := range versions {
|
||||
for _, supportedVersion := range pluginapi.SupportedVersions {
|
||||
if version == supportedVersion {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Allocate is the call that you can use to allocate a set of devices
|
||||
@ -259,7 +316,6 @@ func (m *ManagerImpl) Devices() map[string][]pluginapi.Device {
|
||||
func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
|
||||
pod := attrs.Pod
|
||||
devicesToReuse := make(map[string]sets.String)
|
||||
// TODO: Reuse devices between init containers and regular containers.
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if err := m.allocateContainerResources(pod, &container, devicesToReuse); err != nil {
|
||||
return err
|
||||
@ -287,7 +343,7 @@ func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.P
|
||||
|
||||
// Register registers a device plugin.
|
||||
func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest) (*pluginapi.Empty, error) {
|
||||
glog.Infof("Got registration request from device plugin with resource name %q", r.ResourceName)
|
||||
klog.Infof("Got registration request from device plugin with resource name %q", r.ResourceName)
|
||||
metrics.DevicePluginRegistrationCount.WithLabelValues(r.ResourceName).Inc()
|
||||
var versionCompatible bool
|
||||
for _, v := range pluginapi.SupportedVersions {
|
||||
@ -298,13 +354,13 @@ func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest
|
||||
}
|
||||
if !versionCompatible {
|
||||
errorString := fmt.Sprintf(errUnsupportedVersion, r.Version, pluginapi.SupportedVersions)
|
||||
glog.Infof("Bad registration request from device plugin with resource name %q: %v", r.ResourceName, errorString)
|
||||
klog.Infof("Bad registration request from device plugin with resource name %q: %s", r.ResourceName, errorString)
|
||||
return &pluginapi.Empty{}, fmt.Errorf(errorString)
|
||||
}
|
||||
|
||||
if !v1helper.IsExtendedResourceName(v1.ResourceName(r.ResourceName)) {
|
||||
errorString := fmt.Sprintf(errInvalidResourceName, r.ResourceName)
|
||||
glog.Infof("Bad registration request from device plugin: %v", errorString)
|
||||
klog.Infof("Bad registration request from device plugin: %s", errorString)
|
||||
return &pluginapi.Empty{}, fmt.Errorf(errorString)
|
||||
}
|
||||
|
||||
@ -323,8 +379,8 @@ func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest
|
||||
func (m *ManagerImpl) Stop() error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
for _, e := range m.endpoints {
|
||||
e.stop()
|
||||
for _, eI := range m.endpoints {
|
||||
eI.e.stop()
|
||||
}
|
||||
|
||||
if m.server == nil {
|
||||
@ -336,66 +392,42 @@ func (m *ManagerImpl) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) registerEndpoint(resourceName string, options *pluginapi.DevicePluginOptions, e endpoint) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
m.endpoints[resourceName] = endpointInfo{e: e, opts: options}
|
||||
klog.V(2).Infof("Registered endpoint %v", e)
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) runEndpoint(resourceName string, e endpoint) {
|
||||
e.run()
|
||||
e.stop()
|
||||
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if old, ok := m.endpoints[resourceName]; ok && old.e == e {
|
||||
m.markResourceUnhealthy(resourceName)
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Endpoint (%s, %v) became unhealthy", resourceName, e)
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) {
|
||||
existingDevs := make(map[string]pluginapi.Device)
|
||||
m.mutex.Lock()
|
||||
old, ok := m.endpoints[r.ResourceName]
|
||||
if ok && old != nil {
|
||||
// Pass devices of previous endpoint into re-registered one,
|
||||
// to avoid potential orphaned devices upon re-registration
|
||||
devices := make(map[string]pluginapi.Device)
|
||||
for _, device := range old.getDevices() {
|
||||
device.Health = pluginapi.Unhealthy
|
||||
devices[device.ID] = device
|
||||
}
|
||||
existingDevs = devices
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
|
||||
socketPath := filepath.Join(m.socketdir, r.Endpoint)
|
||||
e, err := newEndpointImpl(socketPath, r.ResourceName, existingDevs, m.callback)
|
||||
new, err := newEndpointImpl(filepath.Join(m.socketdir, r.Endpoint), r.ResourceName, m.callback)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to dial device plugin with request %v: %v", r, err)
|
||||
klog.Errorf("Failed to dial device plugin with request %v: %v", r, err)
|
||||
return
|
||||
}
|
||||
m.mutex.Lock()
|
||||
if r.Options != nil {
|
||||
m.pluginOpts[r.ResourceName] = r.Options
|
||||
}
|
||||
// Check for potential re-registration during the initialization of new endpoint,
|
||||
// and skip updating if re-registration happens.
|
||||
// TODO: simplify the part once we have a better way to handle registered devices
|
||||
ext := m.endpoints[r.ResourceName]
|
||||
if ext != old {
|
||||
glog.Warningf("Some other endpoint %v is added while endpoint %v is initialized", ext, e)
|
||||
m.mutex.Unlock()
|
||||
e.stop()
|
||||
return
|
||||
}
|
||||
// Associates the newly created endpoint with the corresponding resource name.
|
||||
// Stops existing endpoint if there is any.
|
||||
m.endpoints[r.ResourceName] = e
|
||||
glog.V(2).Infof("Registered endpoint %v", e)
|
||||
m.mutex.Unlock()
|
||||
|
||||
if old != nil {
|
||||
old.stop()
|
||||
}
|
||||
|
||||
m.registerEndpoint(r.ResourceName, r.Options, new)
|
||||
go func() {
|
||||
e.run()
|
||||
e.stop()
|
||||
m.mutex.Lock()
|
||||
if old, ok := m.endpoints[r.ResourceName]; ok && old == e {
|
||||
m.markResourceUnhealthy(r.ResourceName)
|
||||
}
|
||||
glog.V(2).Infof("Unregistered endpoint %v", e)
|
||||
m.mutex.Unlock()
|
||||
m.runEndpoint(r.ResourceName, new)
|
||||
}()
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) markResourceUnhealthy(resourceName string) {
|
||||
glog.V(2).Infof("Mark all resources Unhealthy for resource %s", resourceName)
|
||||
klog.V(2).Infof("Mark all resources Unhealthy for resource %s", resourceName)
|
||||
healthyDevices := sets.NewString()
|
||||
if _, ok := m.healthyDevices[resourceName]; ok {
|
||||
healthyDevices = m.healthyDevices[resourceName]
|
||||
@ -426,13 +458,13 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
|
||||
deletedResources := sets.NewString()
|
||||
m.mutex.Lock()
|
||||
for resourceName, devices := range m.healthyDevices {
|
||||
e, ok := m.endpoints[resourceName]
|
||||
if (ok && e.stopGracePeriodExpired()) || !ok {
|
||||
eI, ok := m.endpoints[resourceName]
|
||||
if (ok && eI.e.stopGracePeriodExpired()) || !ok {
|
||||
// The resources contained in endpoints and (un)healthyDevices
|
||||
// should always be consistent. Otherwise, we run with the risk
|
||||
// of failing to garbage collect non-existing resources or devices.
|
||||
if !ok {
|
||||
glog.Errorf("unexpected: healthyDevices and endpoints are out of sync")
|
||||
klog.Errorf("unexpected: healthyDevices and endpoints are out of sync")
|
||||
}
|
||||
delete(m.endpoints, resourceName)
|
||||
delete(m.healthyDevices, resourceName)
|
||||
@ -444,10 +476,10 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
|
||||
}
|
||||
}
|
||||
for resourceName, devices := range m.unhealthyDevices {
|
||||
e, ok := m.endpoints[resourceName]
|
||||
if (ok && e.stopGracePeriodExpired()) || !ok {
|
||||
eI, ok := m.endpoints[resourceName]
|
||||
if (ok && eI.e.stopGracePeriodExpired()) || !ok {
|
||||
if !ok {
|
||||
glog.Errorf("unexpected: unhealthyDevices and endpoints are out of sync")
|
||||
klog.Errorf("unexpected: unhealthyDevices and endpoints are out of sync")
|
||||
}
|
||||
delete(m.endpoints, resourceName)
|
||||
delete(m.unhealthyDevices, resourceName)
|
||||
@ -493,7 +525,7 @@ func (m *ManagerImpl) readCheckpoint() error {
|
||||
err := m.checkpointManager.GetCheckpoint(kubeletDeviceManagerCheckpoint, cp)
|
||||
if err != nil {
|
||||
if err == errors.ErrCheckpointNotFound {
|
||||
glog.Warningf("Failed to retrieve checkpoint for %q: %v", kubeletDeviceManagerCheckpoint, err)
|
||||
klog.Warningf("Failed to retrieve checkpoint for %q: %v", kubeletDeviceManagerCheckpoint, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@ -508,7 +540,7 @@ func (m *ManagerImpl) readCheckpoint() error {
|
||||
// will stay zero till the corresponding device plugin re-registers.
|
||||
m.healthyDevices[resource] = sets.NewString()
|
||||
m.unhealthyDevices[resource] = sets.NewString()
|
||||
m.endpoints[resource] = newStoppedEndpointImpl(resource, make(map[string]pluginapi.Device))
|
||||
m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -530,7 +562,7 @@ func (m *ManagerImpl) updateAllocatedDevices(activePods []*v1.Pod) {
|
||||
if len(podsToBeRemoved) <= 0 {
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("pods to be removed: %v", podsToBeRemoved.List())
|
||||
klog.V(3).Infof("pods to be removed: %v", podsToBeRemoved.List())
|
||||
m.podDevices.delete(podsToBeRemoved.List())
|
||||
// Regenerated allocatedDevices after we update pod allocation information.
|
||||
m.allocatedDevices = m.podDevices.devices()
|
||||
@ -546,22 +578,22 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
|
||||
// This can happen if a container restarts for example.
|
||||
devices := m.podDevices.containerDevices(podUID, contName, resource)
|
||||
if devices != nil {
|
||||
glog.V(3).Infof("Found pre-allocated devices for resource %s container %q in Pod %q: %v", resource, contName, podUID, devices.List())
|
||||
klog.V(3).Infof("Found pre-allocated devices for resource %s container %q in Pod %q: %v", resource, contName, podUID, devices.List())
|
||||
needed = needed - devices.Len()
|
||||
// A pod's resource is not expected to change once admitted by the API server,
|
||||
// so just fail loudly here. We can revisit this part if this no longer holds.
|
||||
if needed != 0 {
|
||||
return nil, fmt.Errorf("pod %v container %v changed request for resource %v from %v to %v", podUID, contName, resource, devices.Len(), required)
|
||||
return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", podUID, contName, resource, devices.Len(), required)
|
||||
}
|
||||
}
|
||||
if needed == 0 {
|
||||
// No change, no work.
|
||||
return nil, nil
|
||||
}
|
||||
glog.V(3).Infof("Needs to allocate %v %v for pod %q container %q", needed, resource, podUID, contName)
|
||||
klog.V(3).Infof("Needs to allocate %d %q for pod %q container %q", needed, resource, podUID, contName)
|
||||
// Needs to allocate additional devices.
|
||||
if _, ok := m.healthyDevices[resource]; !ok {
|
||||
return nil, fmt.Errorf("can't allocate unregistered device %v", resource)
|
||||
return nil, fmt.Errorf("can't allocate unregistered device %s", resource)
|
||||
}
|
||||
devices = sets.NewString()
|
||||
// Allocates from reusableDevices list first.
|
||||
@ -609,7 +641,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
||||
for k, v := range container.Resources.Limits {
|
||||
resource := string(k)
|
||||
needed := int(v.Value())
|
||||
glog.V(3).Infof("needs %d %s", needed, resource)
|
||||
klog.V(3).Infof("needs %d %s", needed, resource)
|
||||
if !m.isDevicePluginResource(resource) {
|
||||
continue
|
||||
}
|
||||
@ -641,7 +673,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
||||
// plugin Allocate grpc calls if it becomes common that a container may require
|
||||
// resources from multiple device plugins.
|
||||
m.mutex.Lock()
|
||||
e, ok := m.endpoints[resource]
|
||||
eI, ok := m.endpoints[resource]
|
||||
m.mutex.Unlock()
|
||||
if !ok {
|
||||
m.mutex.Lock()
|
||||
@ -653,8 +685,8 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
||||
devs := allocDevices.UnsortedList()
|
||||
// TODO: refactor this part of code to just append a ContainerAllocationRequest
|
||||
// in a passed in AllocateRequest pointer, and issues a single Allocate call per pod.
|
||||
glog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource)
|
||||
resp, err := e.allocate(devs)
|
||||
klog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource)
|
||||
resp, err := eI.e.allocate(devs)
|
||||
metrics.DevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInMicroseconds(startRPCTime))
|
||||
if err != nil {
|
||||
// In case of allocation failure, we want to restore m.allocatedDevices
|
||||
@ -665,6 +697,10 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
||||
return err
|
||||
}
|
||||
|
||||
if len(resp.ContainerResponses) == 0 {
|
||||
return fmt.Errorf("No containers return in allocation response %v", resp)
|
||||
}
|
||||
|
||||
// Update internal cached podDevices state.
|
||||
m.mutex.Lock()
|
||||
m.podDevices.insert(podUID, contName, resource, allocDevices, resp.ContainerResponses[0])
|
||||
@ -700,16 +736,15 @@ func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co
|
||||
// with PreStartRequired option set.
|
||||
func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource string) error {
|
||||
m.mutex.Lock()
|
||||
opts, ok := m.pluginOpts[resource]
|
||||
eI, ok := m.endpoints[resource]
|
||||
if !ok {
|
||||
m.mutex.Unlock()
|
||||
glog.V(4).Infof("Plugin options not found in cache for resource: %s. Skip PreStartContainer", resource)
|
||||
return nil
|
||||
return fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource)
|
||||
}
|
||||
|
||||
if !opts.PreStartRequired {
|
||||
if eI.opts == nil || !eI.opts.PreStartRequired {
|
||||
m.mutex.Unlock()
|
||||
glog.V(4).Infof("Plugin options indicate to skip PreStartContainer for resource, %v", resource)
|
||||
klog.V(4).Infof("Plugin options indicate to skip PreStartContainer for resource: %s", resource)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -719,16 +754,10 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
|
||||
return fmt.Errorf("no devices found allocated in local cache for pod %s, container %s, resource %s", podUID, contName, resource)
|
||||
}
|
||||
|
||||
e, ok := m.endpoints[resource]
|
||||
if !ok {
|
||||
m.mutex.Unlock()
|
||||
return fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource)
|
||||
}
|
||||
|
||||
m.mutex.Unlock()
|
||||
devs := devices.UnsortedList()
|
||||
glog.V(4).Infof("Issuing an PreStartContainer call for container, %s, of pod %s", contName, podUID)
|
||||
_, err := e.preStartContainer(devs)
|
||||
klog.V(4).Infof("Issuing an PreStartContainer call for container, %s, of pod %s", contName, podUID)
|
||||
_, err := eI.e.preStartContainer(devs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("device plugin PreStartContainer rpc failed with err: %v", err)
|
||||
}
|
||||
@ -774,3 +803,10 @@ func (m *ManagerImpl) isDevicePluginResource(resource string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetDevices returns the devices used by the specified container
|
||||
func (m *ManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
return m.podDevices.getContainerDevices(podUID, containerName)
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go
generated
vendored
@ -18,9 +18,10 @@ package devicemanager
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
@ -42,11 +43,6 @@ func (h *ManagerStub) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Devices returns an empty map.
|
||||
func (h *ManagerStub) Devices() map[string][]pluginapi.Device {
|
||||
return make(map[string][]pluginapi.Device)
|
||||
}
|
||||
|
||||
// Allocate simply returns nil.
|
||||
func (h *ManagerStub) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
|
||||
return nil
|
||||
@ -61,3 +57,13 @@ func (h *ManagerStub) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co
|
||||
func (h *ManagerStub) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) {
|
||||
return nil, nil, []string{}
|
||||
}
|
||||
|
||||
// GetWatcherHandler returns plugin watcher interface
|
||||
func (h *ManagerStub) GetWatcherHandler() pluginwatcher.PluginHandler {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDevices returns nil
|
||||
func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
|
||||
return nil
|
||||
}
|
||||
|
338
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_test.go
generated
vendored
338
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_test.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -33,8 +33,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
watcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
@ -65,31 +67,18 @@ func TestNewManagerImplStart(t *testing.T) {
|
||||
socketDir, socketName, pluginSocketName, err := tmpSocketDir()
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(socketDir)
|
||||
m, p := setup(t, []*pluginapi.Device{}, func(n string, a, u, r []pluginapi.Device) {}, socketName, pluginSocketName)
|
||||
cleanup(t, m, p)
|
||||
m, _, p := setup(t, []*pluginapi.Device{}, func(n string, d []pluginapi.Device) {}, socketName, pluginSocketName)
|
||||
cleanup(t, m, p, nil)
|
||||
// Stop should tolerate being called more than once.
|
||||
cleanup(t, m, p)
|
||||
cleanup(t, m, p, nil)
|
||||
}
|
||||
|
||||
func TestNewManagerImplStop(t *testing.T) {
|
||||
func TestNewManagerImplStartProbeMode(t *testing.T) {
|
||||
socketDir, socketName, pluginSocketName, err := tmpSocketDir()
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(socketDir)
|
||||
|
||||
m, err := newManagerImpl(socketName)
|
||||
require.NoError(t, err)
|
||||
// No prior Start, but that should be okay.
|
||||
err = m.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
devs := []*pluginapi.Device{
|
||||
{ID: "Dev1", Health: pluginapi.Healthy},
|
||||
{ID: "Dev2", Health: pluginapi.Healthy},
|
||||
}
|
||||
p := NewDevicePluginStub(devs, pluginSocketName)
|
||||
// Same here.
|
||||
err = p.Stop()
|
||||
require.NoError(t, err)
|
||||
m, _, p, w := setupInProbeMode(t, []*pluginapi.Device{}, func(n string, d []pluginapi.Device) {}, socketName, pluginSocketName)
|
||||
cleanup(t, m, p, w)
|
||||
}
|
||||
|
||||
// Tests that the device plugin manager correctly handles registration and re-registration by
|
||||
@ -107,91 +96,184 @@ func TestDevicePluginReRegistration(t *testing.T) {
|
||||
{ID: "Dev3", Health: pluginapi.Healthy},
|
||||
}
|
||||
for _, preStartContainerFlag := range []bool{false, true} {
|
||||
|
||||
expCallbackCount := int32(0)
|
||||
callbackCount := int32(0)
|
||||
callbackChan := make(chan int32)
|
||||
callback := func(n string, a, u, r []pluginapi.Device) {
|
||||
callbackCount++
|
||||
if callbackCount > atomic.LoadInt32(&expCallbackCount) {
|
||||
t.FailNow()
|
||||
}
|
||||
callbackChan <- callbackCount
|
||||
}
|
||||
m, p1 := setup(t, devs, callback, socketName, pluginSocketName)
|
||||
atomic.StoreInt32(&expCallbackCount, 1)
|
||||
p1.Register(socketName, testResourceName, preStartContainerFlag)
|
||||
// Wait for the first callback to be issued.
|
||||
m, ch, p1 := setup(t, devs, nil, socketName, pluginSocketName)
|
||||
p1.Register(socketName, testResourceName, "")
|
||||
|
||||
select {
|
||||
case <-callbackChan:
|
||||
break
|
||||
case <-time.After(time.Second):
|
||||
t.FailNow()
|
||||
case <-ch:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timeout while waiting for manager update")
|
||||
}
|
||||
devices := m.Devices()
|
||||
require.Equal(t, 2, len(devices[testResourceName]), "Devices are not updated.")
|
||||
capacity, allocatable, _ := m.GetCapacity()
|
||||
resourceCapacity, _ := capacity[v1.ResourceName(testResourceName)]
|
||||
resourceAllocatable, _ := allocatable[v1.ResourceName(testResourceName)]
|
||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||
|
||||
p2 := NewDevicePluginStub(devs, pluginSocketName+".new")
|
||||
p2 := NewDevicePluginStub(devs, pluginSocketName+".new", testResourceName, preStartContainerFlag)
|
||||
err = p2.Start()
|
||||
require.NoError(t, err)
|
||||
atomic.StoreInt32(&expCallbackCount, 2)
|
||||
p2.Register(socketName, testResourceName, preStartContainerFlag)
|
||||
// Wait for the second callback to be issued.
|
||||
select {
|
||||
case <-callbackChan:
|
||||
break
|
||||
case <-time.After(time.Second):
|
||||
t.FailNow()
|
||||
}
|
||||
p2.Register(socketName, testResourceName, "")
|
||||
|
||||
devices2 := m.Devices()
|
||||
require.Equal(t, 2, len(devices2[testResourceName]), "Devices shouldn't change.")
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timeout while waiting for manager update")
|
||||
}
|
||||
capacity, allocatable, _ = m.GetCapacity()
|
||||
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
|
||||
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
|
||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices shouldn't change.")
|
||||
|
||||
// Test the scenario that a plugin re-registers with different devices.
|
||||
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third")
|
||||
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third", testResourceName, preStartContainerFlag)
|
||||
err = p3.Start()
|
||||
require.NoError(t, err)
|
||||
atomic.StoreInt32(&expCallbackCount, 3)
|
||||
p3.Register(socketName, testResourceName, preStartContainerFlag)
|
||||
// Wait for the second callback to be issued.
|
||||
p3.Register(socketName, testResourceName, "")
|
||||
|
||||
select {
|
||||
case <-callbackChan:
|
||||
break
|
||||
case <-time.After(time.Second):
|
||||
t.FailNow()
|
||||
case <-ch:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timeout while waiting for manager update")
|
||||
}
|
||||
devices3 := m.Devices()
|
||||
require.Equal(t, 1, len(devices3[testResourceName]), "Devices of plugin previously registered should be removed.")
|
||||
capacity, allocatable, _ = m.GetCapacity()
|
||||
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
|
||||
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
|
||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of plugin previously registered should be removed.")
|
||||
p2.Stop()
|
||||
p3.Stop()
|
||||
cleanup(t, m, p1)
|
||||
close(callbackChan)
|
||||
cleanup(t, m, p1, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t *testing.T, devs []*pluginapi.Device, callback monitorCallback, socketName string, pluginSocketName string) (Manager, *Stub) {
|
||||
// Tests that the device plugin manager correctly handles registration and re-registration by
|
||||
// making sure that after registration, devices are correctly updated and if a re-registration
|
||||
// happens, we will NOT delete devices; and no orphaned devices left.
|
||||
// While testing above scenario, plugin discovery and registration will be done using
|
||||
// Kubelet probe based mechanism
|
||||
func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
|
||||
socketDir, socketName, pluginSocketName, err := tmpSocketDir()
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(socketDir)
|
||||
devs := []*pluginapi.Device{
|
||||
{ID: "Dev1", Health: pluginapi.Healthy},
|
||||
{ID: "Dev2", Health: pluginapi.Healthy},
|
||||
}
|
||||
devsForRegistration := []*pluginapi.Device{
|
||||
{ID: "Dev3", Health: pluginapi.Healthy},
|
||||
}
|
||||
|
||||
m, ch, p1, w := setupInProbeMode(t, devs, nil, socketName, pluginSocketName)
|
||||
|
||||
// Wait for the first callback to be issued.
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.FailNow()
|
||||
}
|
||||
capacity, allocatable, _ := m.GetCapacity()
|
||||
resourceCapacity, _ := capacity[v1.ResourceName(testResourceName)]
|
||||
resourceAllocatable, _ := allocatable[v1.ResourceName(testResourceName)]
|
||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||
|
||||
p2 := NewDevicePluginStub(devs, pluginSocketName+".new", testResourceName, false)
|
||||
err = p2.Start()
|
||||
require.NoError(t, err)
|
||||
// Wait for the second callback to be issued.
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
capacity, allocatable, _ = m.GetCapacity()
|
||||
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
|
||||
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
|
||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
|
||||
|
||||
// Test the scenario that a plugin re-registers with different devices.
|
||||
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third", testResourceName, false)
|
||||
err = p3.Start()
|
||||
require.NoError(t, err)
|
||||
// Wait for the third callback to be issued.
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
capacity, allocatable, _ = m.GetCapacity()
|
||||
resourceCapacity, _ = capacity[v1.ResourceName(testResourceName)]
|
||||
resourceAllocatable, _ = allocatable[v1.ResourceName(testResourceName)]
|
||||
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
|
||||
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of previous registered should be removed")
|
||||
p2.Stop()
|
||||
p3.Stop()
|
||||
cleanup(t, m, p1, w)
|
||||
}
|
||||
|
||||
func setupDeviceManager(t *testing.T, devs []*pluginapi.Device, callback monitorCallback, socketName string) (Manager, <-chan interface{}) {
|
||||
m, err := newManagerImpl(socketName)
|
||||
require.NoError(t, err)
|
||||
updateChan := make(chan interface{})
|
||||
|
||||
m.callback = callback
|
||||
if callback != nil {
|
||||
m.callback = callback
|
||||
}
|
||||
|
||||
originalCallback := m.callback
|
||||
m.callback = func(resourceName string, devices []pluginapi.Device) {
|
||||
originalCallback(resourceName, devices)
|
||||
updateChan <- new(interface{})
|
||||
}
|
||||
activePods := func() []*v1.Pod {
|
||||
return []*v1.Pod{}
|
||||
}
|
||||
|
||||
err = m.Start(activePods, &sourcesReadyStub{})
|
||||
require.NoError(t, err)
|
||||
|
||||
p := NewDevicePluginStub(devs, pluginSocketName)
|
||||
err = p.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
return m, p
|
||||
return m, updateChan
|
||||
}
|
||||
|
||||
func cleanup(t *testing.T, m Manager, p *Stub) {
|
||||
func setupDevicePlugin(t *testing.T, devs []*pluginapi.Device, pluginSocketName string) *Stub {
|
||||
p := NewDevicePluginStub(devs, pluginSocketName, testResourceName, false)
|
||||
err := p.Start()
|
||||
require.NoError(t, err)
|
||||
return p
|
||||
}
|
||||
|
||||
func setupPluginWatcher(pluginSocketName string, m Manager) *pluginwatcher.Watcher {
|
||||
w := pluginwatcher.NewWatcher(filepath.Dir(pluginSocketName), "" /* deprecatedSockDir */)
|
||||
w.AddHandler(watcherapi.DevicePlugin, m.GetWatcherHandler())
|
||||
w.Start()
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func setup(t *testing.T, devs []*pluginapi.Device, callback monitorCallback, socketName string, pluginSocketName string) (Manager, <-chan interface{}, *Stub) {
|
||||
m, updateChan := setupDeviceManager(t, devs, callback, socketName)
|
||||
p := setupDevicePlugin(t, devs, pluginSocketName)
|
||||
return m, updateChan, p
|
||||
}
|
||||
|
||||
func setupInProbeMode(t *testing.T, devs []*pluginapi.Device, callback monitorCallback, socketName string, pluginSocketName string) (Manager, <-chan interface{}, *Stub, *pluginwatcher.Watcher) {
|
||||
m, updateChan := setupDeviceManager(t, devs, callback, socketName)
|
||||
w := setupPluginWatcher(pluginSocketName, m)
|
||||
p := setupDevicePlugin(t, devs, pluginSocketName)
|
||||
return m, updateChan, p, w
|
||||
}
|
||||
|
||||
func cleanup(t *testing.T, m Manager, p *Stub, w *pluginwatcher.Watcher) {
|
||||
p.Stop()
|
||||
m.Stop()
|
||||
if w != nil {
|
||||
require.NoError(t, w.Stop())
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
@ -213,9 +295,9 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
// Adds three devices for resource1, two healthy and one unhealthy.
|
||||
// Expects capacity for resource1 to be 2.
|
||||
resourceName1 := "domain1.com/resource1"
|
||||
e1 := &endpointImpl{devices: make(map[string]pluginapi.Device)}
|
||||
testManager.endpoints[resourceName1] = e1
|
||||
callback(resourceName1, devs, []pluginapi.Device{}, []pluginapi.Device{})
|
||||
e1 := &endpointImpl{}
|
||||
testManager.endpoints[resourceName1] = endpointInfo{e: e1, opts: nil}
|
||||
callback(resourceName1, devs)
|
||||
capacity, allocatable, removedResources := testManager.GetCapacity()
|
||||
resource1Capacity, ok := capacity[v1.ResourceName(resourceName1)]
|
||||
as.True(ok)
|
||||
@ -226,7 +308,8 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
as.Equal(0, len(removedResources))
|
||||
|
||||
// Deletes an unhealthy device should NOT change allocatable but change capacity.
|
||||
callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{}, []pluginapi.Device{devs[2]})
|
||||
devs1 := devs[:len(devs)-1]
|
||||
callback(resourceName1, devs1)
|
||||
capacity, allocatable, removedResources = testManager.GetCapacity()
|
||||
resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)]
|
||||
as.True(ok)
|
||||
@ -237,34 +320,34 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
as.Equal(0, len(removedResources))
|
||||
|
||||
// Updates a healthy device to unhealthy should reduce allocatable by 1.
|
||||
dev2 := devs[1]
|
||||
dev2.Health = pluginapi.Unhealthy
|
||||
callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{dev2}, []pluginapi.Device{})
|
||||
devs[1].Health = pluginapi.Unhealthy
|
||||
callback(resourceName1, devs)
|
||||
capacity, allocatable, removedResources = testManager.GetCapacity()
|
||||
resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)]
|
||||
as.True(ok)
|
||||
resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)]
|
||||
as.True(ok)
|
||||
as.Equal(int64(2), resource1Capacity.Value())
|
||||
as.Equal(int64(3), resource1Capacity.Value())
|
||||
as.Equal(int64(1), resource1Allocatable.Value())
|
||||
as.Equal(0, len(removedResources))
|
||||
|
||||
// Deletes a healthy device should reduce capacity and allocatable by 1.
|
||||
callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{}, []pluginapi.Device{devs[0]})
|
||||
devs2 := devs[1:]
|
||||
callback(resourceName1, devs2)
|
||||
capacity, allocatable, removedResources = testManager.GetCapacity()
|
||||
resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)]
|
||||
as.True(ok)
|
||||
resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)]
|
||||
as.True(ok)
|
||||
as.Equal(int64(0), resource1Allocatable.Value())
|
||||
as.Equal(int64(1), resource1Capacity.Value())
|
||||
as.Equal(int64(2), resource1Capacity.Value())
|
||||
as.Equal(0, len(removedResources))
|
||||
|
||||
// Tests adding another resource.
|
||||
resourceName2 := "resource2"
|
||||
e2 := &endpointImpl{devices: make(map[string]pluginapi.Device)}
|
||||
testManager.endpoints[resourceName2] = e2
|
||||
callback(resourceName2, devs, []pluginapi.Device{}, []pluginapi.Device{})
|
||||
e2 := &endpointImpl{}
|
||||
testManager.endpoints[resourceName2] = endpointInfo{e: e2, opts: nil}
|
||||
callback(resourceName2, devs)
|
||||
capacity, allocatable, removedResources = testManager.GetCapacity()
|
||||
as.Equal(2, len(capacity))
|
||||
resource2Capacity, ok := capacity[v1.ResourceName(resourceName2)]
|
||||
@ -272,7 +355,7 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
resource2Allocatable, ok := allocatable[v1.ResourceName(resourceName2)]
|
||||
as.True(ok)
|
||||
as.Equal(int64(3), resource2Capacity.Value())
|
||||
as.Equal(int64(2), resource2Allocatable.Value())
|
||||
as.Equal(int64(1), resource2Allocatable.Value())
|
||||
as.Equal(0, len(removedResources))
|
||||
|
||||
// Expires resourceName1 endpoint. Verifies testManager.GetCapacity() reports that resourceName1
|
||||
@ -374,7 +457,7 @@ func TestCheckpoint(t *testing.T) {
|
||||
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
|
||||
as.Nil(err)
|
||||
testManager := &ManagerImpl{
|
||||
endpoints: make(map[string]endpoint),
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
@ -460,11 +543,7 @@ type MockEndpoint struct {
|
||||
func (m *MockEndpoint) stop() {}
|
||||
func (m *MockEndpoint) run() {}
|
||||
|
||||
func (m *MockEndpoint) getDevices() []pluginapi.Device {
|
||||
return []pluginapi.Device{}
|
||||
}
|
||||
|
||||
func (m *MockEndpoint) callback(resourceName string, added, updated, deleted []pluginapi.Device) {}
|
||||
func (m *MockEndpoint) callback(resourceName string, devices []pluginapi.Device) {}
|
||||
|
||||
func (m *MockEndpoint) preStartContainer(devs []string) (*pluginapi.PreStartContainerResponse, error) {
|
||||
m.initChan <- devs
|
||||
@ -499,8 +578,8 @@ func makePod(limits v1.ResourceList) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestResource, opts map[string]*pluginapi.DevicePluginOptions) (*ManagerImpl, error) {
|
||||
monitorCallback := func(resourceName string, added, updated, deleted []pluginapi.Device) {}
|
||||
func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestResource) (*ManagerImpl, error) {
|
||||
monitorCallback := func(resourceName string, devices []pluginapi.Device) {}
|
||||
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -511,41 +590,45 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
endpoints: make(map[string]endpoint),
|
||||
pluginOpts: opts,
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
podDevices: make(podDevices),
|
||||
activePods: activePods,
|
||||
sourcesReady: &sourcesReadyStub{},
|
||||
checkpointManager: ckm,
|
||||
}
|
||||
|
||||
for _, res := range testRes {
|
||||
testManager.healthyDevices[res.resourceName] = sets.NewString()
|
||||
for _, dev := range res.devs {
|
||||
testManager.healthyDevices[res.resourceName].Insert(dev)
|
||||
}
|
||||
if res.resourceName == "domain1.com/resource1" {
|
||||
testManager.endpoints[res.resourceName] = &MockEndpoint{
|
||||
allocateFunc: allocateStubFunc(),
|
||||
testManager.endpoints[res.resourceName] = endpointInfo{
|
||||
e: &MockEndpoint{allocateFunc: allocateStubFunc()},
|
||||
opts: nil,
|
||||
}
|
||||
}
|
||||
if res.resourceName == "domain2.com/resource2" {
|
||||
testManager.endpoints[res.resourceName] = &MockEndpoint{
|
||||
allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) {
|
||||
resp := new(pluginapi.ContainerAllocateResponse)
|
||||
resp.Envs = make(map[string]string)
|
||||
for _, dev := range devs {
|
||||
switch dev {
|
||||
case "dev3":
|
||||
resp.Envs["key2"] = "val2"
|
||||
testManager.endpoints[res.resourceName] = endpointInfo{
|
||||
e: &MockEndpoint{
|
||||
allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) {
|
||||
resp := new(pluginapi.ContainerAllocateResponse)
|
||||
resp.Envs = make(map[string]string)
|
||||
for _, dev := range devs {
|
||||
switch dev {
|
||||
case "dev3":
|
||||
resp.Envs["key2"] = "val2"
|
||||
|
||||
case "dev4":
|
||||
resp.Envs["key2"] = "val3"
|
||||
case "dev4":
|
||||
resp.Envs["key2"] = "val3"
|
||||
}
|
||||
}
|
||||
}
|
||||
resps := new(pluginapi.AllocateResponse)
|
||||
resps.ContainerResponses = append(resps.ContainerResponses, resp)
|
||||
return resps, nil
|
||||
resps := new(pluginapi.AllocateResponse)
|
||||
resps.ContainerResponses = append(resps.ContainerResponses, resp)
|
||||
return resps, nil
|
||||
},
|
||||
},
|
||||
opts: nil,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -591,8 +674,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
|
||||
as.Nil(err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
nodeInfo := getTestNodeInfo(v1.ResourceList{})
|
||||
pluginOpts := make(map[string]*pluginapi.DevicePluginOptions)
|
||||
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources, pluginOpts)
|
||||
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources)
|
||||
as.Nil(err)
|
||||
|
||||
testPods := []*v1.Pod{
|
||||
@ -619,7 +701,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
|
||||
expectedContainerOptsLen: []int{3, 2, 2},
|
||||
expectedAllocatedResName1: 2,
|
||||
expectedAllocatedResName2: 1,
|
||||
expErr: nil,
|
||||
expErr: nil,
|
||||
},
|
||||
{
|
||||
description: "Requesting to create a pod without enough resources should fail",
|
||||
@ -627,7 +709,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
|
||||
expectedContainerOptsLen: nil,
|
||||
expectedAllocatedResName1: 2,
|
||||
expectedAllocatedResName2: 1,
|
||||
expErr: fmt.Errorf("requested number of devices unavailable for domain1.com/resource1. Requested: 1, Available: 0"),
|
||||
expErr: fmt.Errorf("requested number of devices unavailable for domain1.com/resource1. Requested: 1, Available: 0"),
|
||||
},
|
||||
{
|
||||
description: "Successful allocation of all available Res1 resources and Res2 resources",
|
||||
@ -635,7 +717,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
|
||||
expectedContainerOptsLen: []int{0, 0, 1},
|
||||
expectedAllocatedResName1: 2,
|
||||
expectedAllocatedResName2: 2,
|
||||
expErr: nil,
|
||||
expErr: nil,
|
||||
},
|
||||
}
|
||||
activePods := []*v1.Pod{}
|
||||
@ -687,8 +769,8 @@ func TestInitContainerDeviceAllocation(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "checkpoint")
|
||||
as.Nil(err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
pluginOpts := make(map[string]*pluginapi.DevicePluginOptions)
|
||||
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources, pluginOpts)
|
||||
|
||||
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources)
|
||||
as.Nil(err)
|
||||
|
||||
podWithPluginResourcesInInitContainers := &v1.Pod{
|
||||
@ -766,7 +848,7 @@ func TestSanitizeNodeAllocatable(t *testing.T) {
|
||||
devID2 := "dev2"
|
||||
|
||||
as := assert.New(t)
|
||||
monitorCallback := func(resourceName string, added, updated, deleted []pluginapi.Device) {}
|
||||
monitorCallback := func(resourceName string, devices []pluginapi.Device) {}
|
||||
tmpDir, err := ioutil.TempDir("", "checkpoint")
|
||||
as.Nil(err)
|
||||
|
||||
@ -822,18 +904,18 @@ func TestDevicePreStartContainer(t *testing.T) {
|
||||
as.Nil(err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
nodeInfo := getTestNodeInfo(v1.ResourceList{})
|
||||
pluginOpts := make(map[string]*pluginapi.DevicePluginOptions)
|
||||
pluginOpts[res1.resourceName] = &pluginapi.DevicePluginOptions{PreStartRequired: true}
|
||||
|
||||
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, []TestResource{res1}, pluginOpts)
|
||||
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, []TestResource{res1})
|
||||
as.Nil(err)
|
||||
|
||||
ch := make(chan []string, 1)
|
||||
testManager.endpoints[res1.resourceName] = &MockEndpoint{
|
||||
initChan: ch,
|
||||
allocateFunc: allocateStubFunc(),
|
||||
testManager.endpoints[res1.resourceName] = endpointInfo{
|
||||
e: &MockEndpoint{
|
||||
initChan: ch,
|
||||
allocateFunc: allocateStubFunc(),
|
||||
},
|
||||
opts: &pluginapi.DevicePluginOptions{PreStartRequired: true},
|
||||
}
|
||||
|
||||
pod := makePod(v1.ResourceList{
|
||||
v1.ResourceName(res1.resourceName): res1.resourceQuantity})
|
||||
activePods := []*v1.Pod{}
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go
generated
vendored
@ -17,10 +17,11 @@ limitations under the License.
|
||||
package devicemanager
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
@ -135,13 +136,13 @@ func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry {
|
||||
for resource, devices := range resources {
|
||||
devIds := devices.deviceIds.UnsortedList()
|
||||
if devices.allocResp == nil {
|
||||
glog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource)
|
||||
klog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource)
|
||||
continue
|
||||
}
|
||||
|
||||
allocResp, err := devices.allocResp.Marshal()
|
||||
if err != nil {
|
||||
glog.Errorf("Can't marshal allocResp for %v %v %v: %v", podUID, conName, resource, err)
|
||||
klog.Errorf("Can't marshal allocResp for %v %v %v: %v", podUID, conName, resource, err)
|
||||
continue
|
||||
}
|
||||
data = append(data, checkpoint.PodDevicesEntry{
|
||||
@ -159,7 +160,7 @@ func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry {
|
||||
// Populates podDevices from the passed in checkpointData.
|
||||
func (pdev podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) {
|
||||
for _, entry := range data {
|
||||
glog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n",
|
||||
klog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n",
|
||||
entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, entry.AllocResp)
|
||||
devIDs := sets.NewString()
|
||||
for _, devID := range entry.DeviceIDs {
|
||||
@ -168,7 +169,7 @@ func (pdev podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) {
|
||||
allocResp := &pluginapi.ContainerAllocateResponse{}
|
||||
err := allocResp.Unmarshal(entry.AllocResp)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err)
|
||||
klog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err)
|
||||
continue
|
||||
}
|
||||
pdev.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, devIDs, allocResp)
|
||||
@ -203,13 +204,13 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
||||
// Updates RunContainerOptions.Envs.
|
||||
for k, v := range resp.Envs {
|
||||
if e, ok := envsMap[k]; ok {
|
||||
glog.V(4).Infof("Skip existing env %s %s", k, v)
|
||||
klog.V(4).Infof("Skip existing env %s %s", k, v)
|
||||
if e != v {
|
||||
glog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v)
|
||||
klog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Add env %s %s", k, v)
|
||||
klog.V(4).Infof("Add env %s %s", k, v)
|
||||
envsMap[k] = v
|
||||
opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v})
|
||||
}
|
||||
@ -217,14 +218,14 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
||||
// Updates RunContainerOptions.Devices.
|
||||
for _, dev := range resp.Devices {
|
||||
if d, ok := devsMap[dev.ContainerPath]; ok {
|
||||
glog.V(4).Infof("Skip existing device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
klog.V(4).Infof("Skip existing device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
if d != dev.HostPath {
|
||||
glog.Errorf("Container device %s has conflicting mapping host devices: %s and %s",
|
||||
klog.Errorf("Container device %s has conflicting mapping host devices: %s and %s",
|
||||
dev.ContainerPath, d, dev.HostPath)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Add device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
klog.V(4).Infof("Add device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
devsMap[dev.ContainerPath] = dev.HostPath
|
||||
opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{
|
||||
PathOnHost: dev.HostPath,
|
||||
@ -236,14 +237,14 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
||||
// Updates RunContainerOptions.Mounts.
|
||||
for _, mount := range resp.Mounts {
|
||||
if m, ok := mountsMap[mount.ContainerPath]; ok {
|
||||
glog.V(4).Infof("Skip existing mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
klog.V(4).Infof("Skip existing mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
if m != mount.HostPath {
|
||||
glog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s",
|
||||
klog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s",
|
||||
mount.ContainerPath, m, mount.HostPath)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Add mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
klog.V(4).Infof("Add mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
mountsMap[mount.ContainerPath] = mount.HostPath
|
||||
opts.Mounts = append(opts.Mounts, kubecontainer.Mount{
|
||||
Name: mount.ContainerPath,
|
||||
@ -258,16 +259,34 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
||||
// Updates for Annotations
|
||||
for k, v := range resp.Annotations {
|
||||
if e, ok := annotationsMap[k]; ok {
|
||||
glog.V(4).Infof("Skip existing annotation %s %s", k, v)
|
||||
klog.V(4).Infof("Skip existing annotation %s %s", k, v)
|
||||
if e != v {
|
||||
glog.Errorf("Annotation %s has conflicting setting: %s and %s", k, e, v)
|
||||
klog.Errorf("Annotation %s has conflicting setting: %s and %s", k, e, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Add annotation %s %s", k, v)
|
||||
klog.V(4).Infof("Add annotation %s %s", k, v)
|
||||
annotationsMap[k] = v
|
||||
opts.Annotations = append(opts.Annotations, kubecontainer.Annotation{Name: k, Value: v})
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// getContainerDevices returns the devices assigned to the provided container for all ResourceNames
|
||||
func (pdev podDevices) getContainerDevices(podUID, contName string) []*podresourcesapi.ContainerDevices {
|
||||
if _, podExists := pdev[podUID]; !podExists {
|
||||
return nil
|
||||
}
|
||||
if _, contExists := pdev[podUID][contName]; !contExists {
|
||||
return nil
|
||||
}
|
||||
cDev := []*podresourcesapi.ContainerDevices{}
|
||||
for resource, allocateInfo := range pdev[podUID][contName] {
|
||||
cDev = append(cDev, &podresourcesapi.ContainerDevices{
|
||||
ResourceName: resource,
|
||||
DeviceIds: allocateInfo.deviceIds.UnsortedList(),
|
||||
})
|
||||
}
|
||||
return cDev
|
||||
}
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go
generated
vendored
@ -20,10 +20,11 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
@ -32,11 +33,6 @@ type Manager interface {
|
||||
// Start starts device plugin registration service.
|
||||
Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady) error
|
||||
|
||||
// Devices is the map of devices that have registered themselves
|
||||
// against the manager.
|
||||
// The map key is the ResourceName of the device plugins.
|
||||
Devices() map[string][]pluginapi.Device
|
||||
|
||||
// Allocate configures and assigns devices to pods. The pods are provided
|
||||
// through the pod admission attributes in the attrs argument. From the
|
||||
// requested device resources, Allocate will communicate with the owning
|
||||
@ -58,6 +54,10 @@ type Manager interface {
|
||||
// GetCapacity returns the amount of available device plugin resource capacity, resource allocatable
|
||||
// and inactive device plugin resources previously registered on the node.
|
||||
GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
|
||||
GetWatcherHandler() watcher.PluginHandler
|
||||
|
||||
// GetDevices returns information about the devices assigned to pods and containers
|
||||
GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices
|
||||
}
|
||||
|
||||
// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices.
|
||||
@ -79,22 +79,14 @@ const (
|
||||
errFailedToDialDevicePlugin = "failed to dial device plugin:"
|
||||
// errUnsupportedVersion is the error raised when the device plugin uses an API version not
|
||||
// supported by the Kubelet registry
|
||||
errUnsupportedVersion = "requested API version %q is not supported by kubelet. Supported versions are %q"
|
||||
// errDevicePluginAlreadyExists is the error raised when a device plugin with the
|
||||
// same Resource Name tries to register itself
|
||||
errDevicePluginAlreadyExists = "another device plugin already registered this Resource Name"
|
||||
errUnsupportedVersion = "requested API version %q is not supported by kubelet. Supported version is %q"
|
||||
// errInvalidResourceName is the error raised when a device plugin is registering
|
||||
// itself with an invalid ResourceName
|
||||
errInvalidResourceName = "the ResourceName %q is invalid"
|
||||
// errEmptyResourceName is the error raised when the resource name field is empty
|
||||
errEmptyResourceName = "invalid Empty ResourceName"
|
||||
// errEndpointStopped indicates that the endpoint has been stopped
|
||||
errEndpointStopped = "endpoint %v has been stopped"
|
||||
|
||||
// errBadSocket is the error raised when the registry socket path is not absolute
|
||||
errBadSocket = "bad socketPath, must be an absolute path:"
|
||||
// errRemoveSocket is the error raised when the registry could not remove the existing socket
|
||||
errRemoveSocket = "failed to remove socket while starting device plugin registry, with error"
|
||||
// errListenSocket is the error raised when the registry could not listen on the socket
|
||||
errListenSocket = "failed to listen to socket while starting device plugin registry, with error"
|
||||
// errListAndWatch is the error raised when ListAndWatch ended unsuccessfully
|
||||
|
46
vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers.go
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cm
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
)
|
||||
|
||||
// hardEvictionReservation returns a resourcelist that includes reservation of resources based on hard eviction thresholds.
|
||||
func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.ResourceList) v1.ResourceList {
|
||||
if len(thresholds) == 0 {
|
||||
return nil
|
||||
}
|
||||
ret := v1.ResourceList{}
|
||||
for _, threshold := range thresholds {
|
||||
if threshold.Operator != evictionapi.OpLessThan {
|
||||
continue
|
||||
}
|
||||
switch threshold.Signal {
|
||||
case evictionapi.SignalMemoryAvailable:
|
||||
memoryCapacity := capacity[v1.ResourceMemory]
|
||||
value := evictionapi.GetThresholdQuantity(threshold.Value, &memoryCapacity)
|
||||
ret[v1.ResourceMemory] = *value
|
||||
case evictionapi.SignalNodeFsAvailable:
|
||||
storageCapacity := capacity[v1.ResourceEphemeralStorage]
|
||||
value := evictionapi.GetThresholdQuantity(threshold.Value, &storageCapacity)
|
||||
ret[v1.ResourceEphemeralStorage] = *value
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go
generated
vendored
@ -27,9 +27,11 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -44,28 +46,29 @@ const (
|
||||
)
|
||||
|
||||
// MilliCPUToQuota converts milliCPU to CFS quota and period values.
|
||||
func MilliCPUToQuota(milliCPU int64) (quota int64, period uint64) {
|
||||
func MilliCPUToQuota(milliCPU int64, period int64) (quota int64) {
|
||||
// CFS quota is measured in two values:
|
||||
// - cfs_period_us=100ms (the amount of time to measure usage across)
|
||||
// - cfs_period_us=100ms (the amount of time to measure usage across given by period)
|
||||
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
|
||||
// so in the above example, you are limited to 20% of a single CPU
|
||||
// for multi-cpu environments, you just scale equivalent amounts
|
||||
// see https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt for details
|
||||
|
||||
if milliCPU == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// we set the period to 100ms by default
|
||||
period = QuotaPeriod
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) {
|
||||
period = QuotaPeriod
|
||||
}
|
||||
|
||||
// we then convert your milliCPU to a value normalized over a period
|
||||
quota = (milliCPU * QuotaPeriod) / MilliCPUToCPU
|
||||
quota = (milliCPU * period) / MilliCPUToCPU
|
||||
|
||||
// quota needs to be a minimum of 1ms.
|
||||
if quota < MinQuotaPeriod {
|
||||
quota = MinQuotaPeriod
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -103,7 +106,7 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
|
||||
}
|
||||
|
||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool) *ResourceConfig {
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64) *ResourceConfig {
|
||||
// sum requests and limits.
|
||||
reqs, limits := resource.PodRequestsAndLimits(pod)
|
||||
|
||||
@ -122,7 +125,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool) *ResourceConfig {
|
||||
|
||||
// convert to CFS values
|
||||
cpuShares := MilliCPUToShares(cpuRequests)
|
||||
cpuQuota, cpuPeriod := MilliCPUToQuota(cpuLimits)
|
||||
cpuQuota := MilliCPUToQuota(cpuLimits, int64(cpuPeriod))
|
||||
|
||||
// track if limits were applied for each resource.
|
||||
memoryLimitsDeclared := true
|
||||
|
335
vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux_test.go
generated
vendored
335
vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux_test.go
generated
vendored
@ -20,11 +20,15 @@ package cm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"strconv"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
pkgfeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// getResourceList returns a ResourceList with the
|
||||
@ -49,14 +53,18 @@ func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequir
|
||||
}
|
||||
|
||||
func TestResourceConfigForPod(t *testing.T) {
|
||||
defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond)
|
||||
tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond)
|
||||
|
||||
minShares := uint64(MinShares)
|
||||
burstableShares := MilliCPUToShares(100)
|
||||
memoryQuantity := resource.MustParse("200Mi")
|
||||
burstableMemory := memoryQuantity.Value()
|
||||
burstablePartialShares := MilliCPUToShares(200)
|
||||
burstableQuota, burstablePeriod := MilliCPUToQuota(200)
|
||||
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
||||
guaranteedShares := MilliCPUToShares(100)
|
||||
guaranteedQuota, guaranteedPeriod := MilliCPUToQuota(100)
|
||||
guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
||||
guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
||||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
@ -64,6 +72,7 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
expected *ResourceConfig
|
||||
enforceCPULimits bool
|
||||
quotaPeriod uint64
|
||||
}{
|
||||
"besteffort": {
|
||||
pod: &v1.Pod{
|
||||
@ -76,6 +85,7 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &minShares},
|
||||
},
|
||||
"burstable-no-limits": {
|
||||
@ -89,6 +99,7 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-limits": {
|
||||
@ -102,7 +113,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
@ -115,7 +127,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits": {
|
||||
pod: &v1.Pod{
|
||||
@ -131,6 +144,52 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"burstable-with-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"guaranteed": {
|
||||
@ -144,7 +203,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
@ -157,11 +217,264 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testCase := range testCases {
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits)
|
||||
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod)
|
||||
|
||||
if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) {
|
||||
t.Errorf("unexpected result, test: %v, cpu period not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CpuQuota, testCase.expected.CpuQuota) {
|
||||
t.Errorf("unexpected result, test: %v, cpu quota not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CpuShares, testCase.expected.CpuShares) {
|
||||
t.Errorf("unexpected result, test: %v, cpu shares not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
|
||||
t.Errorf("unexpected result, test: %v, memory not as expected", testName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond)
|
||||
tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond)
|
||||
tunedQuota := int64(1 * time.Millisecond / time.Microsecond)
|
||||
|
||||
utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, false)
|
||||
|
||||
minShares := uint64(MinShares)
|
||||
burstableShares := MilliCPUToShares(100)
|
||||
memoryQuantity := resource.MustParse("200Mi")
|
||||
burstableMemory := memoryQuantity.Value()
|
||||
burstablePartialShares := MilliCPUToShares(200)
|
||||
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
||||
guaranteedShares := MilliCPUToShares(100)
|
||||
guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
||||
guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
||||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
testCases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
expected *ResourceConfig
|
||||
enforceCPULimits bool
|
||||
quotaPeriod uint64
|
||||
}{
|
||||
"besteffort": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &minShares},
|
||||
},
|
||||
"burstable-no-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"burstable-with-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &tunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"guaranteed": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testCase := range testCases {
|
||||
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod)
|
||||
|
||||
if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) {
|
||||
t.Errorf("unexpected result, test: %v, cpu period not as expected", testName)
|
||||
}
|
||||
@ -225,9 +538,9 @@ func TestMilliCPUToQuota(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
quota, period := MilliCPUToQuota(testCase.input)
|
||||
if quota != testCase.quota || period != testCase.period {
|
||||
t.Errorf("Input %v, expected quota %v period %v, but got quota %v period %v", testCase.input, testCase.quota, testCase.period, quota, period)
|
||||
quota := MilliCPUToQuota(testCase.input, int64(testCase.period))
|
||||
if quota != testCase.quota {
|
||||
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.quota, quota)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go
generated
vendored
@ -28,13 +28,12 @@ const (
|
||||
SharesPerCPU = 0
|
||||
MilliCPUToCPU = 0
|
||||
|
||||
QuotaPeriod = 0
|
||||
MinQuotaPeriod = 0
|
||||
)
|
||||
|
||||
// MilliCPUToQuota converts milliCPU to CFS quota and period values.
|
||||
func MilliCPUToQuota(milliCPU int64) (int64, int64) {
|
||||
return 0, 0
|
||||
// MilliCPUToQuota converts milliCPU and period to CFS quota values.
|
||||
func MilliCPUToQuota(milliCPU, period int64) int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// MilliCPUToShares converts the milliCPU to CFS shares.
|
||||
@ -43,7 +42,7 @@ func MilliCPUToShares(milliCPU int64) int64 {
|
||||
}
|
||||
|
||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool) *ResourceConfig {
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool, cpuPeriod uint64) *ResourceConfig {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -23,15 +23,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
@ -50,7 +48,7 @@ func (cm *containerManagerImpl) createNodeAllocatableCgroups() error {
|
||||
return nil
|
||||
}
|
||||
if err := cm.cgroupManager.Create(cgroupConfig); err != nil {
|
||||
glog.Errorf("Failed to create %q cgroup", cm.cgroupRoot)
|
||||
klog.Errorf("Failed to create %q cgroup", cm.cgroupRoot)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -68,7 +66,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
nodeAllocatable = cm.getNodeAllocatableAbsolute()
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Attempting to enforce Node Allocatable with config: %+v", nc)
|
||||
klog.V(4).Infof("Attempting to enforce Node Allocatable with config: %+v", nc)
|
||||
|
||||
cgroupConfig := &CgroupConfig{
|
||||
Name: cm.cgroupRoot,
|
||||
@ -105,7 +103,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
}
|
||||
// Now apply kube reserved and system reserved limits if required.
|
||||
if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) {
|
||||
glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved)
|
||||
klog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved)
|
||||
if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil {
|
||||
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
@ -114,7 +112,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
||||
}
|
||||
if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) {
|
||||
glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved)
|
||||
klog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved)
|
||||
if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil {
|
||||
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
@ -131,7 +129,10 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.
|
||||
Name: cName,
|
||||
ResourceParameters: getCgroupConfig(rl),
|
||||
}
|
||||
glog.V(4).Infof("Enforcing limits on cgroup %q with %d cpu shares and %d bytes of memory", cName, cgroupConfig.ResourceParameters.CpuShares, cgroupConfig.ResourceParameters.Memory)
|
||||
if cgroupConfig.ResourceParameters == nil {
|
||||
return fmt.Errorf("%q cgroup is not config properly", cgroupConfig.Name)
|
||||
}
|
||||
klog.V(4).Infof("Enforcing limits on cgroup %q with %d cpu shares and %d bytes of memory", cName, cgroupConfig.ResourceParameters.CpuShares, cgroupConfig.ResourceParameters.Memory)
|
||||
if !cgroupManager.Exists(cgroupConfig.Name) {
|
||||
return fmt.Errorf("%q cgroup does not exist", cgroupConfig.Name)
|
||||
}
|
||||
@ -210,30 +211,6 @@ func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList
|
||||
return result
|
||||
}
|
||||
|
||||
// hardEvictionReservation returns a resourcelist that includes reservation of resources based on hard eviction thresholds.
|
||||
func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.ResourceList) v1.ResourceList {
|
||||
if len(thresholds) == 0 {
|
||||
return nil
|
||||
}
|
||||
ret := v1.ResourceList{}
|
||||
for _, threshold := range thresholds {
|
||||
if threshold.Operator != evictionapi.OpLessThan {
|
||||
continue
|
||||
}
|
||||
switch threshold.Signal {
|
||||
case evictionapi.SignalMemoryAvailable:
|
||||
memoryCapacity := capacity[v1.ResourceMemory]
|
||||
value := evictionapi.GetThresholdQuantity(threshold.Value, &memoryCapacity)
|
||||
ret[v1.ResourceMemory] = *value
|
||||
case evictionapi.SignalNodeFsAvailable:
|
||||
storageCapacity := capacity[v1.ResourceEphemeralStorage]
|
||||
value := evictionapi.GetThresholdQuantity(threshold.Value, &storageCapacity)
|
||||
ret[v1.ResourceEphemeralStorage] = *value
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// validateNodeAllocatable ensures that the user specified Node Allocatable Configuration doesn't reserve more than the node capacity.
|
||||
// Returns error if the configuration is invalid, nil otherwise.
|
||||
func (cm *containerManagerImpl) validateNodeAllocatable() error {
|
26
vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go
generated
vendored
@ -23,11 +23,11 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
@ -51,6 +51,9 @@ type podContainerManagerImpl struct {
|
||||
podPidsLimit int64
|
||||
// enforceCPULimits controls whether cfs quota is enforced or not
|
||||
enforceCPULimits bool
|
||||
// cpuCFSQuotaPeriod is the cfs period value, cfs_period_us, setting per
|
||||
// node for all containers in usec
|
||||
cpuCFSQuotaPeriod uint64
|
||||
}
|
||||
|
||||
// Make sure that podContainerManagerImpl implements the PodContainerManager interface
|
||||
@ -81,7 +84,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
|
||||
// Create the pod container
|
||||
containerConfig := &CgroupConfig{
|
||||
Name: podContainerName,
|
||||
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits),
|
||||
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits, m.cpuCFSQuotaPeriod),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 {
|
||||
containerConfig.ResourceParameters.PodPidsLimit = &m.podPidsLimit
|
||||
@ -134,11 +137,10 @@ func (m *podContainerManagerImpl) killOnePid(pid int) error {
|
||||
// Hate parsing strings, but
|
||||
// vendor/github.com/opencontainers/runc/libcontainer/
|
||||
// also does this.
|
||||
glog.V(3).Infof("process with pid %v no longer exists", pid)
|
||||
klog.V(3).Infof("process with pid %v no longer exists", pid)
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -157,18 +159,18 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName
|
||||
// We try killing all the pids multiple times
|
||||
for i := 0; i < 5; i++ {
|
||||
if i != 0 {
|
||||
glog.V(3).Infof("Attempt %v failed to kill all unwanted process. Retyring", i)
|
||||
klog.V(3).Infof("Attempt %v failed to kill all unwanted process. Retyring", i)
|
||||
}
|
||||
errlist = []error{}
|
||||
for _, pid := range pidsToKill {
|
||||
glog.V(3).Infof("Attempt to kill process with pid: %v", pid)
|
||||
klog.V(3).Infof("Attempt to kill process with pid: %v", pid)
|
||||
if err := m.killOnePid(pid); err != nil {
|
||||
glog.V(3).Infof("failed to kill process with pid: %v", pid)
|
||||
klog.V(3).Infof("failed to kill process with pid: %v", pid)
|
||||
errlist = append(errlist, err)
|
||||
}
|
||||
}
|
||||
if len(errlist) == 0 {
|
||||
glog.V(3).Infof("successfully killed all unwanted processes.")
|
||||
klog.V(3).Infof("successfully killed all unwanted processes.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -179,7 +181,7 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName
|
||||
func (m *podContainerManagerImpl) Destroy(podCgroup CgroupName) error {
|
||||
// Try killing all the processes attached to the pod cgroup
|
||||
if err := m.tryKillingCgroupProcesses(podCgroup); err != nil {
|
||||
glog.V(3).Infof("failed to kill all the processes attached to the %v cgroups", podCgroup)
|
||||
klog.V(3).Infof("failed to kill all the processes attached to the %v cgroups", podCgroup)
|
||||
return fmt.Errorf("failed to kill all the processes attached to the %v cgroups : %v", podCgroup, err)
|
||||
}
|
||||
|
||||
@ -267,7 +269,7 @@ func (m *podContainerManagerImpl) GetAllPodsFromCgroups() (map[types.UID]CgroupN
|
||||
parts := strings.Split(basePath, podCgroupNamePrefix)
|
||||
// the uid is missing, so we log the unexpected cgroup not of form pod<uid>
|
||||
if len(parts) != 2 {
|
||||
glog.Errorf("pod cgroup manager ignoring unexpected cgroup %v because it is not a pod", cgroupfsPath)
|
||||
klog.Errorf("pod cgroup manager ignoring unexpected cgroup %v because it is not a pod", cgroupfsPath)
|
||||
continue
|
||||
}
|
||||
podUID := parts[1]
|
||||
@ -299,7 +301,7 @@ func (m *podContainerManagerNoop) EnsureExists(_ *v1.Pod) error {
|
||||
}
|
||||
|
||||
func (m *podContainerManagerNoop) GetPodContainerName(_ *v1.Pod) (CgroupName, string) {
|
||||
return m.cgroupRoot, m.cgroupRoot.ToCgroupfs()
|
||||
return m.cgroupRoot, ""
|
||||
}
|
||||
|
||||
func (m *podContainerManagerNoop) GetPodContainerNameForDriver(_ *v1.Pod) string {
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
@ -138,7 +138,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
|
||||
go wait.Until(func() {
|
||||
err := m.UpdateCgroups()
|
||||
if err != nil {
|
||||
glog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err)
|
||||
klog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err)
|
||||
}
|
||||
}, periodicQOSCgroupUpdateInterval, wait.NeverStop)
|
||||
|
||||
@ -222,17 +222,17 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C
|
||||
resources := m.getNodeAllocatable()
|
||||
allocatableResource, ok := resources[v1.ResourceMemory]
|
||||
if !ok {
|
||||
glog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.")
|
||||
klog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.")
|
||||
return
|
||||
}
|
||||
allocatable := allocatableResource.Value()
|
||||
if allocatable == 0 {
|
||||
glog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.")
|
||||
klog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.")
|
||||
return
|
||||
}
|
||||
|
||||
for qos, limits := range qosMemoryRequests {
|
||||
glog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve)
|
||||
klog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve)
|
||||
}
|
||||
|
||||
// Calculate QOS memory limits
|
||||
@ -252,7 +252,7 @@ func (m *qosContainerManagerImpl) retrySetMemoryReserve(configs map[v1.PodQOSCla
|
||||
for qos, config := range configs {
|
||||
stats, err := m.cgroupManager.GetResourceStats(config.Name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("[Container Manager] %v", err)
|
||||
klog.V(2).Infof("[Container Manager] %v", err)
|
||||
return
|
||||
}
|
||||
usage := stats.MemoryStats.Usage
|
||||
@ -312,7 +312,7 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error {
|
||||
}
|
||||
}
|
||||
if updateSuccess {
|
||||
glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
|
||||
klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -330,12 +330,12 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error {
|
||||
for _, config := range qosConfigs {
|
||||
err := m.cgroupManager.Update(config)
|
||||
if err != nil {
|
||||
glog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration")
|
||||
klog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
|
||||
klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD
generated
vendored
@ -7,42 +7,10 @@ load(
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"cgroups_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
srcs = [
|
||||
"cgroups_linux.go",
|
||||
"cgroups_unsupported.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/util",
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
|
Reference in New Issue
Block a user