mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor updates
This commit is contained in:
52
vendor/k8s.io/kubernetes/test/e2e_node/BUILD
generated
vendored
52
vendor/k8s.io/kubernetes/test/e2e_node/BUILD
generated
vendored
@ -1,15 +1,12 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"container.go",
|
||||
"device_plugin.go",
|
||||
"doc.go",
|
||||
"docker_util.go",
|
||||
"framework.go",
|
||||
@ -19,7 +16,7 @@ go_library(
|
||||
"simple_mount.go",
|
||||
"util.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"benchmark_util.go",
|
||||
"node_problem_detector_linux.go",
|
||||
"resource_collector.go",
|
||||
@ -27,15 +24,18 @@ go_library(
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm/devicemanager:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/remote:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
@ -44,12 +44,14 @@ go_library(
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
"//vendor/github.com/coreos/go-systemd/util:go_default_library",
|
||||
"//vendor/github.com/docker/docker/api/types:go_default_library",
|
||||
"//vendor/github.com/docker/docker/client:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -58,7 +60,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
@ -82,20 +84,20 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"apparmor_test.go",
|
||||
"container_log_rotation_test.go",
|
||||
"cpu_manager_test.go",
|
||||
"critical_pod_test.go",
|
||||
"docker_test.go",
|
||||
"dockershim_checkpoint_test.go",
|
||||
"dynamic_kubelet_config_test.go",
|
||||
"e2e_node_suite_test.go",
|
||||
"eviction_test.go",
|
||||
"garbage_collector_test.go",
|
||||
"gke_environment_test.go",
|
||||
"hugepages_test.go",
|
||||
"image_id_test.go",
|
||||
"kubelet_test.go",
|
||||
"lifecycle_hook_test.go",
|
||||
"log_path_test.go",
|
||||
"memory_eviction_test.go",
|
||||
"mirror_pod_test.go",
|
||||
"pods_container_manager_test.go",
|
||||
"runtime_conformance_test.go",
|
||||
@ -103,52 +105,49 @@ go_test(
|
||||
"summary_test.go",
|
||||
"volume_manager_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"container_manager_test.go",
|
||||
"density_test.go",
|
||||
"e2e_node_suite_test.go",
|
||||
"node_container_manager_test.go",
|
||||
"resource_usage_test.go",
|
||||
"restart_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["e2e"],
|
||||
deps = [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/status:go_default_library",
|
||||
"//pkg/kubelet/logs:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e_node/services:go_default_library",
|
||||
"//test/e2e_node/system:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
"//vendor/github.com/coreos/go-systemd/util:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/gstruct:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -159,15 +158,21 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e_node/system:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
@ -204,4 +209,5 @@ filegroup(
|
||||
"//test/e2e_node/system:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/OWNERS
generated
vendored
@ -4,5 +4,7 @@ approvers:
|
||||
- vishh
|
||||
- derekwaynecarr
|
||||
- yujuhong
|
||||
- balajismaniam
|
||||
- ConnorDoyle
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/apparmor_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/apparmor_test.go
generated
vendored
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() {
|
||||
f := framework.NewDefaultFramework("apparmor-test")
|
||||
|
||||
It("should reject an unloaded profile", func() {
|
||||
status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existant-profile")
|
||||
status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existent-profile")
|
||||
expectSoftRejection(status)
|
||||
})
|
||||
It("should enforce a profile blocking writes", func() {
|
||||
|
23
vendor/k8s.io/kubernetes/test/e2e_node/builder/build.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e_node/builder/build.go
generated
vendored
@ -87,8 +87,27 @@ func getK8sBin(bin string) (string, error) {
|
||||
return "", fmt.Errorf("Unable to locate %s. Can be defined using --k8s-path.", bin)
|
||||
}
|
||||
|
||||
// TODO: Dedup / merge this with comparable utilities in e2e/util.go
|
||||
// GetK8sRootDir returns the root directory for kubernetes, if present in the gopath.
|
||||
func GetK8sRootDir() (string, error) {
|
||||
dir, err := RootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, fmt.Sprintf("%s/", "k8s.io/kubernetes")), nil
|
||||
}
|
||||
|
||||
// GetCAdvisorRootDir returns the root directory for cAdvisor, if present in the gopath.
|
||||
func GetCAdvisorRootDir() (string, error) {
|
||||
dir, err := RootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, fmt.Sprintf("%s/", "github.com/google/cadvisor")), nil
|
||||
}
|
||||
|
||||
// TODO: Dedup / merge this with comparable utilities in e2e/util.go
|
||||
// RootDir returns the path to the directory containing the k8s.io directory
|
||||
func RootDir() (string, error) {
|
||||
// Get the directory of the current executable
|
||||
_, testExec, _, _ := runtime.Caller(0)
|
||||
path := filepath.Dir(testExec)
|
||||
@ -96,7 +115,7 @@ func GetK8sRootDir() (string, error) {
|
||||
// Look for the kubernetes source root directory
|
||||
if strings.Contains(path, "k8s.io/kubernetes") {
|
||||
splitPath := strings.Split(path, "k8s.io/kubernetes")
|
||||
return filepath.Join(splitPath[0], "k8s.io/kubernetes/"), nil
|
||||
return splitPath[0], nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Could not find kubernetes source root directory.")
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e_node/conformance/build/Makefile
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e_node/conformance/build/Makefile
generated
vendored
@ -15,7 +15,7 @@
|
||||
# Build the node-test image.
|
||||
#
|
||||
# Usage:
|
||||
# [ARCH=amd64] [REGISTRY="gcr.io/google_containers"] [BIN_DIR="../../../../_output/bin"] make (build|push) VERSION={some_version_number e.g. 0.1}
|
||||
# [ARCH=amd64] [REGISTRY="staging-k8s.gcr.io"] [BIN_DIR="../../../../_output/bin"] make (build|push) VERSION={some_version_number e.g. 0.1}
|
||||
|
||||
# SYSTEM_SPEC_NAME is the name of the system spec used for the node conformance
|
||||
# test. The specs are expected to be in SYSTEM_SPEC_DIR.
|
||||
@ -23,7 +23,7 @@ SYSTEM_SPEC_NAME?=
|
||||
SYSTEM_SPEC_DIR?=../../system/specs
|
||||
|
||||
# TODO(random-liu): Add this into release progress.
|
||||
REGISTRY?=gcr.io/google_containers
|
||||
REGISTRY?=staging-k8s.gcr.io
|
||||
ARCH?=amd64
|
||||
# BIN_DIR is the directory to find binaries, overwrite with ../../../../_output/bin
|
||||
# for local development.
|
||||
@ -76,10 +76,10 @@ endif
|
||||
docker build --pull -t ${IMAGE_NAME}-${ARCH}:${VERSION} ${TEMP_DIR}
|
||||
|
||||
push: build
|
||||
gcloud docker -- push ${IMAGE_NAME}-${ARCH}:${VERSION}
|
||||
docker push ${IMAGE_NAME}-${ARCH}:${VERSION}
|
||||
ifeq ($(ARCH),amd64)
|
||||
docker tag ${IMAGE_NAME}-${ARCH}:${VERSION} ${IMAGE_NAME}:${VERSION}
|
||||
gcloud docker -- push ${IMAGE_NAME}:${VERSION}
|
||||
docker push ${IMAGE_NAME}:${VERSION}
|
||||
endif
|
||||
|
||||
.PHONY: all
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/conformance/run_test.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/conformance/run_test.sh
generated
vendored
@ -44,7 +44,7 @@ SKIP=${SKIP:-""}
|
||||
TEST_ARGS=${TEST_ARGS:-""}
|
||||
|
||||
# REGISTRY is the image registry for node test image.
|
||||
REGISTRY=${REGISTRY:-"gcr.io/google_containers"}
|
||||
REGISTRY=${REGISTRY:-"k8s.gcr.io"}
|
||||
|
||||
# ARCH is the architecture of current machine, the script will use this to
|
||||
# select corresponding test container image.
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e_node/container.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e_node/container.go
generated
vendored
@ -28,7 +28,6 @@ import (
|
||||
)
|
||||
|
||||
// One pod one container
|
||||
// TODO: This should be migrated to the e2e framework.
|
||||
type ConformanceContainer struct {
|
||||
Container v1.Container
|
||||
RestartPolicy v1.RestartPolicy
|
||||
|
107
vendor/k8s.io/kubernetes/test/e2e_node/container_log_rotation_test.go
generated
vendored
Normal file
107
vendor/k8s.io/kubernetes/test/e2e_node/container_log_rotation_test.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubelogs "k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
testContainerLogMaxFiles = 3
|
||||
testContainerLogMaxSize = "40Ki"
|
||||
rotationPollInterval = 5 * time.Second
|
||||
rotationEventuallyTimeout = 3 * time.Minute
|
||||
rotationConsistentlyTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("container-log-rotation-test")
|
||||
Context("when a container generates a lot of log", func() {
|
||||
BeforeEach(func() {
|
||||
if framework.TestContext.ContainerRuntime != kubetypes.RemoteContainerRuntime {
|
||||
framework.Skipf("Skipping ContainerLogRotation test since the container runtime is not remote")
|
||||
}
|
||||
})
|
||||
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.CRIContainerLogRotation)] = true
|
||||
initialConfig.ContainerLogMaxFiles = testContainerLogMaxFiles
|
||||
initialConfig.ContainerLogMaxSize = testContainerLogMaxSize
|
||||
})
|
||||
|
||||
It("should be rotated and limited to a fixed amount of files", func() {
|
||||
By("create log container")
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-container-log-rotation",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "log-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{
|
||||
"sh",
|
||||
"-c",
|
||||
// ~12Kb/s. Exceeding 40Kb in 4 seconds. Log rotation period is 10 seconds.
|
||||
"while true; do echo hello world; sleep 0.001; done;",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
By("get container log path")
|
||||
Expect(len(pod.Status.ContainerStatuses)).To(Equal(1))
|
||||
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
|
||||
r, _, err := getCRIClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
status, err := r.ContainerStatus(id)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
logPath := status.GetLogPath()
|
||||
By("wait for container log being rotated to max file limit")
|
||||
Eventually(func() (int, error) {
|
||||
logs, err := kubelogs.GetAllLogs(logPath)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(logs), nil
|
||||
}, rotationEventuallyTimeout, rotationPollInterval).Should(Equal(testContainerLogMaxFiles), "should eventually rotate to max file limit")
|
||||
By("make sure container log number won't exceed max file limit")
|
||||
Consistently(func() (int, error) {
|
||||
logs, err := kubelogs.GetAllLogs(logPath)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(logs), nil
|
||||
}, rotationConsistentlyTimeout, rotationPollInterval).Should(BeNumerically("<=", testContainerLogMaxFiles), "should never exceed max file limit")
|
||||
})
|
||||
})
|
||||
})
|
27
vendor/k8s.io/kubernetes/test/e2e_node/container_manager_test.go
generated
vendored
27
vendor/k8s.io/kubernetes/test/e2e_node/container_manager_test.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -76,10 +77,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-container-manager")
|
||||
Describe("Validate OOM score adjustments", func() {
|
||||
Context("once the node is setup", func() {
|
||||
It("docker daemon's oom-score-adj should be -999", func() {
|
||||
dockerPids, err := getPidsForProcess(dockerProcessName, dockerPidFile)
|
||||
Expect(err).To(BeNil(), "failed to get list of docker daemon pids")
|
||||
for _, pid := range dockerPids {
|
||||
It("container runtime's oom-score-adj should be -999", func() {
|
||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
Expect(err).To(BeNil(), "failed to get list of container runtime pids")
|
||||
for _, pid := range runtimePids {
|
||||
Eventually(func() error {
|
||||
return validateOOMScoreAdjSetting(pid, -999)
|
||||
}, 5*time.Minute, 30*time.Second).Should(BeNil())
|
||||
@ -148,14 +149,22 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
return validateOOMScoreAdjSetting(shPids[0], 1000)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
// Log the running containers here to help debugging. Use `docker ps`
|
||||
// directly for now because the test is already docker specific.
|
||||
// Log the running containers here to help debugging.
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
By("Dump all running docker containers")
|
||||
output, err := exec.Command("docker", "ps").CombinedOutput()
|
||||
By("Dump all running containers")
|
||||
runtime, _, err := getCRIClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Running docker containers:\n%s", string(output))
|
||||
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
|
||||
State: &runtimeapi.ContainerStateValue{
|
||||
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
|
||||
},
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Running containers:")
|
||||
for _, c := range containers {
|
||||
framework.Logf("%+v", c)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
70
vendor/k8s.io/kubernetes/test/e2e_node/cpu_manager_test.go
generated
vendored
70
vendor/k8s.io/kubernetes/test/e2e_node/cpu_manager_test.go
generated
vendored
@ -26,10 +26,11 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -101,14 +102,21 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa
|
||||
return cpuCap.Value(), (cpuCap.Value() - cpuRes.Value()), cpuRes.Value()
|
||||
}
|
||||
|
||||
// TODO(balajismaniam): Make this func generic to all container runtimes.
|
||||
func waitForContainerRemoval(ctnPartName string) {
|
||||
func waitForContainerRemoval(containerName, podName, podNS string) {
|
||||
rs, _, err := getCRIClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Eventually(func() bool {
|
||||
err := exec.Command("/bin/sh", "-c", fmt.Sprintf("if [ -n \"$(docker ps -a | grep -i %s)\" ]; then exit 1; fi", ctnPartName)).Run()
|
||||
containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{
|
||||
LabelSelector: map[string]string{
|
||||
types.KubernetesPodNameLabel: podName,
|
||||
types.KubernetesPodNamespaceLabel: podNS,
|
||||
types.KubernetesContainerNameLabel: containerName,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return len(containers) == 0
|
||||
}, 2*time.Minute, 1*time.Second).Should(BeTrue())
|
||||
}
|
||||
|
||||
@ -128,24 +136,42 @@ func getCPUSiblingList(cpuRes int64) string {
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func deleteStateFile() {
|
||||
err := exec.Command("/bin/sh", "-c", "rm -f /var/lib/kubelet/cpu_manager_state").Run()
|
||||
framework.ExpectNoError(err, "error deleting state file")
|
||||
}
|
||||
|
||||
func setOldKubeletConfig(f *framework.Framework, oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||
// Delete the CPU Manager state file so that the old Kubelet configuration
|
||||
// can take effect.i
|
||||
deleteStateFile()
|
||||
|
||||
if oldCfg != nil {
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, oldCfg))
|
||||
}
|
||||
}
|
||||
|
||||
func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||
// Run only if the container runtime is Docker.
|
||||
// TODO(balajismaniam): Make this test generic to all container runtimes.
|
||||
framework.RunIfContainerRuntimeIs("docker")
|
||||
// Run only if the container runtime is not docker or remote (not rkt).
|
||||
framework.RunIfContainerRuntimeIs("docker", "remote")
|
||||
|
||||
// Enable CPU Manager in Kubelet with static policy.
|
||||
oldCfg, err := getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
newCfg := oldCfg.DeepCopy()
|
||||
if newCfg.FeatureGates == nil {
|
||||
newCfg.FeatureGates = make(map[string]bool)
|
||||
}
|
||||
|
||||
// Enable CPU Manager using feature gate.
|
||||
newCfg.FeatureGates[string(features.CPUManager)] = true
|
||||
// After graduation of the CPU Manager feature to Beta, the CPU Manager
|
||||
// "none" policy is ON by default. But when we set the CPU Manager policy to
|
||||
// "static" in this test and the Kubelet is restarted so that "static"
|
||||
// policy can take effect, there will always be a conflict with the state
|
||||
// checkpointed in the disk (i.e., the policy checkpointed in the disk will
|
||||
// be "none" whereas we are trying to restart Kubelet with "static"
|
||||
// policy). Therefore, we delete the state file so that we can proceed
|
||||
// with the tests.
|
||||
deleteStateFile()
|
||||
|
||||
// Set the CPU Manager policy to static.
|
||||
newCfg.CPUManagerPolicy = string(cpumanager.PolicyStatic)
|
||||
@ -176,7 +202,7 @@ func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.Ku
|
||||
}
|
||||
|
||||
func runCPUManagerTests(f *framework.Framework) {
|
||||
var cpuCap, cpuAlloc, cpuRes int64
|
||||
var cpuCap, cpuAlloc int64
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
var cpuListString, expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
@ -187,7 +213,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
var pod, pod1, pod2 *v1.Pod
|
||||
|
||||
It("should assign CPUs as expected based on the Pod spec", func() {
|
||||
cpuCap, cpuAlloc, cpuRes = getLocalNodeCPUDetails(f)
|
||||
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
|
||||
|
||||
// Skip CPU Manager tests altogether if the CPU capacity < 2.
|
||||
if cpuCap < 2 {
|
||||
@ -216,7 +242,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
|
||||
By("running a Gu pod")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
@ -242,7 +268,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
|
||||
By("running multiple Gu and non-Gu pods")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
@ -288,8 +314,8 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod1.Spec.Containers[0].Name, pod1.Name))
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod2.Spec.Containers[0].Name, pod2.Name))
|
||||
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
||||
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
||||
|
||||
// Skip rest of the tests if CPU capacity < 3.
|
||||
if cpuCap < 3 {
|
||||
@ -324,7 +350,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
|
||||
By("running a Gu pod with multiple containers requesting integer CPUs")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
@ -362,8 +388,8 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[1].Name, pod.Name))
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
|
||||
|
||||
By("running multiple Gu pods")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
@ -407,15 +433,15 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod1.Spec.Containers[0].Name, pod1.Name))
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod2.Spec.Containers[0].Name, pod2.Name))
|
||||
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
||||
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
||||
|
||||
setOldKubeletConfig(f, oldCfg)
|
||||
})
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = SIGDescribe("CPU Manager [Feature:CPUManager]", func() {
|
||||
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager]", func() {
|
||||
f := framework.NewDefaultFramework("cpu-manager-test")
|
||||
|
||||
Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/critical_pod_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/critical_pod_test.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -130,7 +131,7 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
|
65
vendor/k8s.io/kubernetes/test/e2e_node/density_test.go
generated
vendored
65
vendor/k8s.io/kubernetes/test/e2e_node/density_test.go
generated
vendored
@ -31,10 +31,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -189,22 +191,28 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
Context("", func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
||||
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
||||
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
|
||||
// Note that it will cause higher resource usage.
|
||||
setKubeletAPIQPSLimit(f, int32(itArg.APIQPSLimit))
|
||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||
tempSetCurrentKubeletConfig(f, func(cfg *kubeletconfig.KubeletConfiguration) {
|
||||
framework.Logf("Old QPS limit is: %d", cfg.KubeAPIQPS)
|
||||
// Set new API QPS limit
|
||||
cfg.KubeAPIQPS = int32(itArg.APIQPSLimit)
|
||||
})
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||
|
||||
By("Verifying latency")
|
||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||
By("Verifying latency")
|
||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||
|
||||
By("Verifying resource")
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||
By("Verifying resource")
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
@ -324,7 +332,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
)
|
||||
|
||||
// create test pod data structure
|
||||
pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType)
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), podType)
|
||||
|
||||
// the controller watches the change of pod status
|
||||
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
||||
@ -405,8 +413,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
||||
podType = "density_test_pod"
|
||||
sleepBeforeCreatePods = 30 * time.Second
|
||||
)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, true, framework.GetPauseImageNameForHostArch(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, true, imageutils.GetPauseImageNameForHostArch(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), podType)
|
||||
|
||||
By("Creating a batch of background pods")
|
||||
|
||||
@ -569,34 +577,3 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD
|
||||
func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testInfo map[string]string) {
|
||||
logPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo), "throughput")
|
||||
}
|
||||
|
||||
// setKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
|
||||
func setKubeletAPIQPSLimit(f *framework.Framework, newAPIQPS int32) {
|
||||
const restartGap = 40 * time.Second
|
||||
|
||||
resp := pollConfigz(2*time.Minute, 5*time.Second)
|
||||
kubeCfg, err := decodeConfigz(resp)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Old QPS limit is: %d\n", kubeCfg.KubeAPIQPS)
|
||||
|
||||
// Set new API QPS limit
|
||||
kubeCfg.KubeAPIQPS = newAPIQPS
|
||||
// TODO(coufon): createConfigMap should firstly check whether configmap already exists, if so, use updateConfigMap.
|
||||
// Calling createConfigMap twice will result in error. It is fine for benchmark test because we only run one test on a new node.
|
||||
_, err = createConfigMap(f, kubeCfg)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for Kubelet to restart
|
||||
time.Sleep(restartGap)
|
||||
|
||||
// Check new QPS has been set
|
||||
resp = pollConfigz(2*time.Minute, 5*time.Second)
|
||||
kubeCfg, err = decodeConfigz(resp)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("New QPS limit is: %d\n", kubeCfg.KubeAPIQPS)
|
||||
|
||||
// TODO(coufon): check test result to see if we need to retry here
|
||||
if kubeCfg.KubeAPIQPS != newAPIQPS {
|
||||
framework.Failf("Fail to set new kubelet API QPS limit.")
|
||||
}
|
||||
}
|
||||
|
255
vendor/k8s.io/kubernetes/test/e2e_node/device_plugin.go
generated
vendored
Normal file
255
vendor/k8s.io/kubernetes/test/e2e_node/device_plugin.go
generated
vendored
Normal file
@ -0,0 +1,255 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"regexp"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
dm "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// fake resource name
|
||||
resourceName = "fake.com/resource"
|
||||
)
|
||||
|
||||
// Serial because the test restarts Kubelet
|
||||
var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin]", func() {
|
||||
f := framework.NewDefaultFramework("device-plugin-errors")
|
||||
|
||||
Context("DevicePlugin", func() {
|
||||
It("Verifies the Kubelet device plugin functionality.", func() {
|
||||
By("Start stub device plugin")
|
||||
// fake devices for e2e test
|
||||
devs := []*pluginapi.Device{
|
||||
{ID: "Dev-1", Health: pluginapi.Healthy},
|
||||
{ID: "Dev-2", Health: pluginapi.Healthy},
|
||||
}
|
||||
|
||||
socketPath := pluginapi.DevicePluginPath + "dp." + fmt.Sprintf("%d", time.Now().Unix())
|
||||
|
||||
dp1 := dm.NewDevicePluginStub(devs, socketPath)
|
||||
dp1.SetAllocFunc(stubAllocFunc)
|
||||
err := dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Register resources")
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, false)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for the resource exported by the stub device plugin to become available on the local node")
|
||||
devsLen := int64(len(devs))
|
||||
Eventually(func() int64 {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return numberOfDevices(node, resourceName)
|
||||
}, 30*time.Second, framework.Poll).Should(Equal(devsLen))
|
||||
|
||||
By("Creating one pod on node with at least one fake-device")
|
||||
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs"
|
||||
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
||||
count1, devId1 := parseLogFromNRuns(f, pod1.Name, pod1.Name, 0, deviceIDRE)
|
||||
Expect(devId1).To(Not(Equal("")))
|
||||
|
||||
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Restarting Kubelet and waiting for the current running pod to restart")
|
||||
restartKubelet()
|
||||
|
||||
By("Confirming that after a kubelet and pod restart, fake-device assignement is kept")
|
||||
count1, devIdRestart1 := parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+1, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
||||
By("Wait for node is ready")
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||
|
||||
By("Re-Register resources")
|
||||
dp1 = dm.NewDevicePluginStub(devs, socketPath)
|
||||
dp1.SetAllocFunc(stubAllocFunc)
|
||||
err = dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, false)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for resource to become available on the local node after re-registration")
|
||||
Eventually(func() int64 {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return numberOfDevices(node, resourceName)
|
||||
}, 30*time.Second, framework.Poll).Should(Equal(devsLen))
|
||||
|
||||
By("Creating another pod")
|
||||
pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
|
||||
By("Checking that pods got a different GPU")
|
||||
count2, devId2 := parseLogFromNRuns(f, pod2.Name, pod2.Name, 1, deviceIDRE)
|
||||
|
||||
Expect(devId1).To(Not(Equal(devId2)))
|
||||
|
||||
By("Deleting device plugin.")
|
||||
err = dp1.Stop()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for stub device plugin to become unavailable on the local node")
|
||||
Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return numberOfDevices(node, resourceName) <= 0
|
||||
}, 10*time.Minute, framework.Poll).Should(BeTrue())
|
||||
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||
count1, devIdRestart1 = parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+1, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 := parseLogFromNRuns(f, pod2.Name, pod2.Name, count2+1, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
|
||||
By("Restarting Kubelet.")
|
||||
restartKubelet()
|
||||
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
|
||||
count1, devIdRestart1 = parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+2, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 = parseLogFromNRuns(f, pod2.Name, pod2.Name, count2+2, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
|
||||
// Cleanup
|
||||
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// makeBusyboxPod returns a simple Pod spec with a busybox container
|
||||
// that requests resourceName and runs the specified command.
|
||||
func makeBusyboxPod(resourceName, cmd string) *v1.Pod {
|
||||
podName := "device-plugin-test-" + string(uuid.NewUUID())
|
||||
rl := v1.ResourceList{v1.ResourceName(resourceName): *resource.NewQuantity(1, resource.DecimalSI)}
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: []v1.Container{{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
// Runs the specified command in the test pod.
|
||||
Command: []string{"sh", "-c", cmd},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: rl,
|
||||
Requests: rl,
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// parseLogFromNRuns returns restart count of the specified container
|
||||
// after it has been restarted at least restartCount times,
|
||||
// and the matching string for the specified regular expression parsed from the container logs.
|
||||
func parseLogFromNRuns(f *framework.Framework, podName string, contName string, restartCount int32, re string) (int32, string) {
|
||||
var count int32
|
||||
// Wait till pod has been restarted at least restartCount times.
|
||||
Eventually(func() bool {
|
||||
p, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||
return false
|
||||
}
|
||||
count = p.Status.ContainerStatuses[0].RestartCount
|
||||
return count >= restartCount
|
||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||
}
|
||||
|
||||
framework.Logf("got pod logs: %v", logs)
|
||||
regex := regexp.MustCompile(re)
|
||||
matches := regex.FindStringSubmatch(logs)
|
||||
if len(matches) < 2 {
|
||||
return count, ""
|
||||
}
|
||||
|
||||
return count, matches[1]
|
||||
}
|
||||
|
||||
// numberOfDevices returns the number of devices of resourceName advertised by a node
|
||||
func numberOfDevices(node *v1.Node, resourceName string) int64 {
|
||||
val, ok := node.Status.Capacity[v1.ResourceName(resourceName)]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
return val.Value()
|
||||
}
|
||||
|
||||
// stubAllocFunc will pass to stub device plugin
|
||||
func stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) {
|
||||
var responses pluginapi.AllocateResponse
|
||||
for _, req := range r.ContainerRequests {
|
||||
response := &pluginapi.ContainerAllocateResponse{}
|
||||
for _, requestID := range req.DevicesIDs {
|
||||
dev, ok := devs[requestID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid allocation request with non-existing device %s", requestID)
|
||||
}
|
||||
|
||||
if dev.Health != pluginapi.Healthy {
|
||||
return nil, fmt.Errorf("invalid allocation request with unhealthy device: %s", requestID)
|
||||
}
|
||||
|
||||
// create fake device file
|
||||
fpath := filepath.Join("/tmp", dev.ID)
|
||||
|
||||
// clean first
|
||||
os.RemoveAll(fpath)
|
||||
f, err := os.Create(fpath)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return nil, fmt.Errorf("failed to create fake device file: %s", err)
|
||||
}
|
||||
|
||||
f.Close()
|
||||
|
||||
response.Mounts = append(response.Mounts, &pluginapi.Mount{
|
||||
ContainerPath: fpath,
|
||||
HostPath: fpath,
|
||||
})
|
||||
}
|
||||
responses.ContainerResponses = append(responses.ContainerResponses, response)
|
||||
}
|
||||
|
||||
return &responses, nil
|
||||
}
|
47
vendor/k8s.io/kubernetes/test/e2e_node/docker_util.go
generated
vendored
47
vendor/k8s.io/kubernetes/test/e2e_node/docker_util.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/blang/semver"
|
||||
systemdutil "github.com/coreos/go-systemd/util"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
@ -73,20 +74,54 @@ func isDockerLiveRestoreSupported() (bool, error) {
|
||||
return version.GTE(semver.MustParse("1.26.0")), nil
|
||||
}
|
||||
|
||||
// getDockerInfo returns the Info struct for the running Docker daemon.
|
||||
func getDockerInfo() (types.Info, error) {
|
||||
var info types.Info
|
||||
c, err := client.NewClient(defaultDockerEndpoint, "", nil, nil)
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("failed to create docker client: %v", err)
|
||||
}
|
||||
info, err = c.Info(context.Background())
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("failed to get docker info: %v", err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// isDockerLiveRestoreEnabled returns true if live-restore is enabled in the
|
||||
// Docker.
|
||||
func isDockerLiveRestoreEnabled() (bool, error) {
|
||||
c, err := client.NewClient(defaultDockerEndpoint, "", nil, nil)
|
||||
info, err := getDockerInfo()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create docker client: %v", err)
|
||||
}
|
||||
info, err := c.Info(context.Background())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get docker info: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return info.LiveRestoreEnabled, nil
|
||||
}
|
||||
|
||||
// getDockerLoggingDriver returns the name of the logging driver.
|
||||
func getDockerLoggingDriver() (string, error) {
|
||||
info, err := getDockerInfo()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return info.LoggingDriver, nil
|
||||
}
|
||||
|
||||
// isDockerSELinuxSupportEnabled checks whether the Docker daemon was started
|
||||
// with SELinux support enabled.
|
||||
func isDockerSELinuxSupportEnabled() (bool, error) {
|
||||
info, err := getDockerInfo()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, s := range info.SecurityOptions {
|
||||
if s == "selinux" {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// startDockerDaemon starts the Docker daemon.
|
||||
func startDockerDaemon() error {
|
||||
switch {
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e_node/dockershim_checkpoint_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e_node/dockershim_checkpoint_test.go
generated
vendored
@ -45,6 +45,10 @@ const (
|
||||
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func() {
|
||||
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.RunIfContainerRuntimeIs("docker")
|
||||
})
|
||||
|
||||
It("should clean up pod sandbox checkpoint after pod deletion", func() {
|
||||
podName := "pod-checkpoint-no-disrupt"
|
||||
runPodCheckpointTest(f, podName, func() {
|
||||
@ -86,7 +90,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func()
|
||||
if len(filename) == 0 {
|
||||
continue
|
||||
}
|
||||
framework.Logf("Removing checkpiont %q", filename)
|
||||
framework.Logf("Removing checkpoint %q", filename)
|
||||
_, err := exec.Command("sudo", "rm", filename).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to remove checkpoint file %q: %v", string(filename), err)
|
||||
}
|
||||
|
252
vendor/k8s.io/kubernetes/test/e2e_node/dynamic_kubelet_config_test.go
generated
vendored
252
vendor/k8s.io/kubernetes/test/e2e_node/dynamic_kubelet_config_test.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -37,8 +38,13 @@ import (
|
||||
type configState struct {
|
||||
desc string
|
||||
configSource *apiv1.NodeConfigSource
|
||||
expectConfigOK *apiv1.NodeCondition
|
||||
expectConfigOk *apiv1.NodeCondition
|
||||
expectConfig *kubeletconfig.KubeletConfiguration
|
||||
// whether the state would cause a config change event as a result of the update to Node.Spec.ConfigSource,
|
||||
// assuming that the current source would have also caused a config change event.
|
||||
// for example, some malformed references may result in a download failure, in which case the Kubelet
|
||||
// does not restart to change config, while an invalid payload will be detected upon restart
|
||||
event bool
|
||||
}
|
||||
|
||||
// This test is marked [Disruptive] because the Kubelet restarts several times during this test.
|
||||
@ -79,19 +85,18 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
UID: originalConfigMap.UID,
|
||||
Namespace: originalConfigMap.Namespace,
|
||||
Name: originalConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, originalConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: originalKC})
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(originalConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: originalKC,
|
||||
}, false)
|
||||
})
|
||||
|
||||
Context("When setting new NodeConfigSources that cause transitions between ConfigOK conditions", func() {
|
||||
Context("When setting new NodeConfigSources that cause transitions between ConfigOk conditions", func() {
|
||||
It("the Kubelet should report the appropriate status and configz", func() {
|
||||
var err error
|
||||
// we base the "correct" configmap off of the current configuration,
|
||||
// but we also set the trial duration very high to prevent changing the last-known-good
|
||||
// we base the "correct" configmap off of the current configuration
|
||||
correctKC := originalKC.DeepCopy()
|
||||
correctKC.ConfigTrialDuration = &metav1.Duration{Duration: time.Hour}
|
||||
correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-correct", correctKC)
|
||||
correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
@ -118,18 +123,22 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
// Node.Spec.ConfigSource is nil
|
||||
{desc: "Node.Spec.ConfigSource is nil",
|
||||
configSource: nil,
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: status.CurDefaultMessage,
|
||||
Reason: status.CurDefaultOKReason},
|
||||
expectConfig: nil},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: status.CurLocalMessage,
|
||||
Reason: status.CurLocalOkayReason},
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// Node.Spec.ConfigSource has all nil subfields
|
||||
{desc: "Node.Spec.ConfigSource has all nil subfields",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: nil},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonAllNilSubfields)},
|
||||
expectConfig: nil},
|
||||
expectConfig: nil,
|
||||
event: false,
|
||||
},
|
||||
|
||||
// Node.Spec.ConfigSource.ConfigMapRef is partial
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMapRef is partial",
|
||||
@ -137,20 +146,24 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: "foo",
|
||||
Name: "bar"}}, // missing Namespace
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonPartialObjectReference)},
|
||||
expectConfig: nil},
|
||||
expectConfig: nil,
|
||||
event: false,
|
||||
},
|
||||
|
||||
// Node.Spec.ConfigSource's UID does not align with namespace/name
|
||||
{desc: "Node.Spec.ConfigSource's UID does not align with namespace/name",
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMapRef.UID does not align with Namespace/Name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{UID: "foo",
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", correctConfigMap.UID))},
|
||||
expectConfig: nil},
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID))},
|
||||
expectConfig: nil,
|
||||
event: false,
|
||||
},
|
||||
|
||||
// correct
|
||||
{desc: "correct",
|
||||
@ -158,10 +171,12 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
UID: correctConfigMap.UID,
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, correctConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: correctKC},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(correctConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: correctKC,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// fail-parse
|
||||
{desc: "fail-parse",
|
||||
@ -169,10 +184,12 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
UID: failParseConfigMap.UID,
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgDefaultMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, failParseConfigMap.UID)},
|
||||
expectConfig: nil},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgLocalMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, configMapAPIPath(failParseConfigMap))},
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// fail-validate
|
||||
{desc: "fail-validate",
|
||||
@ -180,27 +197,27 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
UID: failValidateConfigMap.UID,
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgDefaultMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, failValidateConfigMap.UID)},
|
||||
expectConfig: nil},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgLocalMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, configMapAPIPath(failValidateConfigMap))},
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
},
|
||||
}
|
||||
|
||||
L := len(states)
|
||||
for i := 1; i <= L; i++ { // need one less iteration than the number of states
|
||||
testBothDirections(f, &states[i-1 : i][0], states[i:L])
|
||||
testBothDirections(f, &states[i-1 : i][0], states[i:L], 0)
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
Context("When a remote config becomes the new last-known-good before the Kubelet is updated to use a new, bad config", func() {
|
||||
It("it should report a status and configz indicating that it rolled back to the new last-known-good", func() {
|
||||
Context("When a remote config becomes the new last-known-good, and then the Kubelet is updated to use a new, bad config", func() {
|
||||
It("the Kubelet should report a status and configz indicating that it rolled back to the new last-known-good", func() {
|
||||
var err error
|
||||
// we base the "lkg" configmap off of the current configuration, but set the trial
|
||||
// duration very low so that it quickly becomes the last-known-good
|
||||
// we base the "lkg" configmap off of the current configuration
|
||||
lkgKC := originalKC.DeepCopy()
|
||||
lkgKC.ConfigTrialDuration = &metav1.Duration{Duration: time.Nanosecond}
|
||||
lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-intended-lkg", lkgKC)
|
||||
lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
@ -222,10 +239,12 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
UID: lkgConfigMap.UID,
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, lkgConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: lkgKC},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(lkgConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// bad config
|
||||
{desc: "bad config",
|
||||
@ -233,13 +252,16 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
UID: badConfigMap.UID,
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, lkgConfigMap.UID),
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, badConfigMap.UID)},
|
||||
expectConfig: lkgKC},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(lkgConfigMap)),
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, configMapAPIPath(badConfigMap))},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
},
|
||||
}
|
||||
|
||||
testBothDirections(f, &states[0], states[1:])
|
||||
// wait 12 minutes after setting the first config to ensure it has time to pass the trial duration
|
||||
testBothDirections(f, &states[0], states[1:], 12*time.Minute)
|
||||
})
|
||||
})
|
||||
|
||||
@ -268,23 +290,28 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
UID: cm1.UID,
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm1.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: kc1},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm1)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: kc1,
|
||||
event: true,
|
||||
},
|
||||
|
||||
{desc: "cm2",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: cm2.UID,
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm2.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: kc2},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm2)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: kc2,
|
||||
event: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < 50; i++ { // change the config 101 times (changes 3 times in the first iteration, 2 times in each subsequent iteration)
|
||||
testBothDirections(f, &states[0], states[1:])
|
||||
testBothDirections(f, &states[0], states[1:], 0)
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -293,64 +320,73 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
|
||||
// testBothDirections tests the state change represented by each edge, where each state is a vertex,
|
||||
// and there are edges in each direction between first and each of the states.
|
||||
func testBothDirections(f *framework.Framework, first *configState, states []configState) {
|
||||
func testBothDirections(f *framework.Framework, first *configState, states []configState, waitAfterFirst time.Duration) {
|
||||
// set to first and check that everything got set up properly
|
||||
By(fmt.Sprintf("setting configSource to state %q", first.desc))
|
||||
setAndTestKubeletConfigState(f, first)
|
||||
// we don't always expect an event here, because setting "first" might not represent
|
||||
// a change from the current configuration
|
||||
setAndTestKubeletConfigState(f, first, false)
|
||||
|
||||
time.Sleep(waitAfterFirst)
|
||||
|
||||
// for each state, set to that state, check condition and configz, then reset to first and check again
|
||||
for i := range states {
|
||||
By(fmt.Sprintf("from %q to %q", first.desc, states[i].desc))
|
||||
setAndTestKubeletConfigState(f, &states[i])
|
||||
// from first -> states[i], states[i].event fully describes whether we should get a config change event
|
||||
setAndTestKubeletConfigState(f, &states[i], states[i].event)
|
||||
|
||||
By(fmt.Sprintf("back to %q from %q", first.desc, states[i].desc))
|
||||
setAndTestKubeletConfigState(f, first)
|
||||
// whether first -> states[i] should have produced a config change event partially determines whether states[i] -> first should produce an event
|
||||
setAndTestKubeletConfigState(f, first, first.event && states[i].event)
|
||||
}
|
||||
}
|
||||
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the ConfigOK condition
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the ConfigOk condition
|
||||
// and (if appropriate) configuration exposed via conifgz are as expected.
|
||||
// The configuration will be converted to the internal type prior to comparison.
|
||||
func setAndTestKubeletConfigState(f *framework.Framework, state *configState) {
|
||||
func setAndTestKubeletConfigState(f *framework.Framework, state *configState, expectEvent bool) {
|
||||
// set the desired state, retry a few times in case we are competing with other editors
|
||||
Eventually(func() error {
|
||||
if err := setNodeConfigSource(f, state.configSource); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("case %s: error setting Node.Spec.ConfigSource", err)
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second).Should(BeNil())
|
||||
// check that config source actually got set to what we expect
|
||||
checkNodeConfigSource(f, state.configSource)
|
||||
checkNodeConfigSource(f, state.desc, state.configSource)
|
||||
// check condition
|
||||
checkConfigOKCondition(f, state.expectConfigOK)
|
||||
checkConfigOkCondition(f, state.desc, state.expectConfigOk)
|
||||
// check expectConfig
|
||||
if state.expectConfig != nil {
|
||||
checkConfig(f, state.expectConfig)
|
||||
checkConfig(f, state.desc, state.expectConfig)
|
||||
}
|
||||
// check that an event was sent for the config change
|
||||
if expectEvent {
|
||||
checkEvent(f, state.desc, state.configSource)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure the node's config source matches what we expect, after setting it
|
||||
func checkNodeConfigSource(f *framework.Framework, expect *apiv1.NodeConfigSource) {
|
||||
func checkNodeConfigSource(f *framework.Framework, desc string, expect *apiv1.NodeConfigSource) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
)
|
||||
|
||||
Eventually(func() error {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("checkNodeConfigSource: case %s: %v", desc, err)
|
||||
}
|
||||
actual := node.Spec.ConfigSource
|
||||
if !reflect.DeepEqual(expect, actual) {
|
||||
return fmt.Errorf(spew.Sprintf("expected %#v but got %#v", expect, actual))
|
||||
return fmt.Errorf(spew.Sprintf("checkNodeConfigSource: case %s: expected %#v but got %#v", desc, expect, actual))
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// make sure the ConfigOK node condition eventually matches what we expect
|
||||
func checkConfigOKCondition(f *framework.Framework, expect *apiv1.NodeCondition) {
|
||||
// make sure the ConfigOk node condition eventually matches what we expect
|
||||
func checkConfigOkCondition(f *framework.Framework, desc string, expect *apiv1.NodeCondition) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
@ -359,14 +395,14 @@ func checkConfigOKCondition(f *framework.Framework, expect *apiv1.NodeCondition)
|
||||
Eventually(func() error {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: %v", desc, err)
|
||||
}
|
||||
actual := getConfigOKCondition(node.Status.Conditions)
|
||||
actual := getKubeletConfigOkCondition(node.Status.Conditions)
|
||||
if actual == nil {
|
||||
return fmt.Errorf("ConfigOK condition not found on node %q", framework.TestContext.NodeName)
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: ConfigOk condition not found on node %q", desc, framework.TestContext.NodeName)
|
||||
}
|
||||
if err := expectConfigOK(expect, actual); err != nil {
|
||||
return err
|
||||
if err := expectConfigOk(expect, actual); err != nil {
|
||||
return fmt.Errorf("checkConfigOkCondition: case %s: %v", desc, err)
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
@ -374,7 +410,7 @@ func checkConfigOKCondition(f *framework.Framework, expect *apiv1.NodeCondition)
|
||||
|
||||
// if the actual matches the expect, return nil, else error explaining the mismatch
|
||||
// if a subfield of the expect is the empty string, that check is skipped
|
||||
func expectConfigOK(expect, actual *apiv1.NodeCondition) error {
|
||||
func expectConfigOk(expect, actual *apiv1.NodeCondition) error {
|
||||
if expect.Status != actual.Status {
|
||||
return fmt.Errorf("expected condition Status %q but got %q", expect.Status, actual.Status)
|
||||
}
|
||||
@ -388,7 +424,7 @@ func expectConfigOK(expect, actual *apiv1.NodeCondition) error {
|
||||
}
|
||||
|
||||
// make sure config exposed on configz matches what we expect
|
||||
func checkConfig(f *framework.Framework, expect *kubeletconfig.KubeletConfiguration) {
|
||||
func checkConfig(f *framework.Framework, desc string, expect *kubeletconfig.KubeletConfiguration) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
@ -396,11 +432,63 @@ func checkConfig(f *framework.Framework, expect *kubeletconfig.KubeletConfigurat
|
||||
Eventually(func() error {
|
||||
actual, err := getCurrentKubeletConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("checkConfig: case %s: %v", desc, err)
|
||||
}
|
||||
if !reflect.DeepEqual(expect, actual) {
|
||||
return fmt.Errorf(spew.Sprintf("expected %#v but got %#v", expect, actual))
|
||||
return fmt.Errorf(spew.Sprintf("checkConfig: case %s: expected %#v but got %#v", desc, expect, actual))
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// checkEvent makes sure an event was sent marking the Kubelet's restart to use new config,
|
||||
// and that it mentions the config we expect.
|
||||
func checkEvent(f *framework.Framework, desc string, expect *apiv1.NodeConfigSource) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
)
|
||||
Eventually(func() error {
|
||||
events, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("checkEvent: case %s: %v", desc, err)
|
||||
}
|
||||
// find config changed event with most recent timestamp
|
||||
var recent *apiv1.Event
|
||||
for i := range events.Items {
|
||||
if events.Items[i].Reason == controller.KubeletConfigChangedEventReason {
|
||||
if recent == nil {
|
||||
recent = &events.Items[i]
|
||||
continue
|
||||
}
|
||||
// for these events, first and last timestamp are always the same
|
||||
if events.Items[i].FirstTimestamp.Time.After(recent.FirstTimestamp.Time) {
|
||||
recent = &events.Items[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we expect at least one config change event
|
||||
if recent == nil {
|
||||
return fmt.Errorf("checkEvent: case %s: no events found with reason %s", desc, controller.KubeletConfigChangedEventReason)
|
||||
}
|
||||
|
||||
// ensure the message is what we expect (including the resource path)
|
||||
expectMessage := fmt.Sprintf(controller.EventMessageFmt, controller.LocalConfigMessage)
|
||||
if expect != nil {
|
||||
if expect.ConfigMapRef != nil {
|
||||
expectMessage = fmt.Sprintf(controller.EventMessageFmt, fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", expect.ConfigMapRef.Namespace, expect.ConfigMapRef.Name))
|
||||
}
|
||||
}
|
||||
if expectMessage != recent.Message {
|
||||
return fmt.Errorf("checkEvent: case %s: expected event message %q but got %q", desc, expectMessage, recent.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// constructs the expected SelfLink for a config map
|
||||
func configMapAPIPath(cm *apiv1.ConfigMap) string {
|
||||
return fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", cm.Namespace, cm.Name)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/e2e_node_suite_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/e2e_node_suite_test.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/environment/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/environment/BUILD
generated
vendored
@ -8,8 +8,7 @@ load(
|
||||
|
||||
go_binary(
|
||||
name = "environment",
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/environment",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e_node/environment/setup_host.sh
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e_node/environment/setup_host.sh
generated
vendored
@ -57,17 +57,17 @@ cat /etc/*-release | grep "ID=ubuntu"
|
||||
if [ $? -eq 0 ]; then
|
||||
if ! which nsenter > /dev/null; then
|
||||
echo "Do not find nsenter. Install it."
|
||||
mkdir -p /tmp/nsenter-install
|
||||
cd /tmp/nsenter-install
|
||||
curl https://www.kernel.org/pub/linux/utils/util-linux/v2.24/util-linux-2.24.tar.gz | tar -zxf-
|
||||
NSENTER_BUILD_DIR=$(mktemp -d /tmp/nsenter-build-XXXXXX)
|
||||
cd $NSENTER_BUILD_DIR
|
||||
curl https://www.kernel.org/pub/linux/utils/util-linux/v2.31/util-linux-2.31.tar.gz | tar -zxf-
|
||||
sudo apt-get update
|
||||
sudo apt-get --yes install make
|
||||
sudo apt-get --yes install gcc
|
||||
cd util-linux-2.24
|
||||
cd util-linux-2.31
|
||||
./configure --without-ncurses
|
||||
make nsenter
|
||||
sudo cp nsenter /usr/local/bin
|
||||
rm -rf /tmp/nsenter-install
|
||||
rm -rf $NSENTER_BUILD_DIR
|
||||
fi
|
||||
fi
|
||||
|
||||
|
121
vendor/k8s.io/kubernetes/test/e2e_node/eviction_test.go
generated
vendored
121
vendor/k8s.io/kubernetes/test/e2e_node/eviction_test.go
generated
vendored
@ -19,6 +19,7 @@ package e2e_node
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -28,9 +29,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -50,12 +52,13 @@ const (
|
||||
pressureDelay = 20 * time.Second
|
||||
testContextFmt = "when we run containers that should cause %s"
|
||||
noPressure = v1.NodeConditionType("NoPressure")
|
||||
lotsOfDisk = 10240 // 10 Gb in Mb
|
||||
lotsOfDisk = 10240 // 10 Gb in Mb
|
||||
lotsOfFiles = 1000000000 // 1 billion
|
||||
)
|
||||
|
||||
// InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods.
|
||||
// Node disk pressure is induced by consuming all inodes on the node.
|
||||
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("inode-eviction-test")
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
pressureTimeout := 15 * time.Minute
|
||||
@ -74,11 +77,11 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logInodeMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: inodeConsumingPod("container-inode-hog", nil),
|
||||
pod: inodeConsumingPod("container-inode-hog", lotsOfFiles, nil),
|
||||
},
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: inodeConsumingPod("volume-inode-hog", &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}),
|
||||
pod: inodeConsumingPod("volume-inode-hog", lotsOfFiles, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0,
|
||||
@ -88,9 +91,38 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
|
||||
})
|
||||
})
|
||||
|
||||
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
|
||||
// Disk pressure is induced by pulling large images
|
||||
var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("image-gc-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
inodesConsumed := uint64(100000)
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
||||
summary := eventuallyGetSummary()
|
||||
inodesFree := *summary.Node.Fs.InodesFree
|
||||
if inodesFree <= inodesConsumed {
|
||||
framework.Skipf("Too few inodes free on the host for the InodeEviction test to run")
|
||||
}
|
||||
initialConfig.EvictionHard = map[string]string{"nodefs.inodesFree": fmt.Sprintf("%d", inodesFree-inodesConsumed)}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
// Consume enough inodes to induce disk pressure,
|
||||
// but expect that image garbage collection can reduce it enough to avoid an eviction
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 0,
|
||||
pod: inodeConsumingPod("container-inode", 110000, nil),
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods.
|
||||
// Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
|
||||
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
|
||||
expectedNodeCondition := v1.NodeMemoryPressure
|
||||
pressureTimeout := 10 * time.Minute
|
||||
@ -104,7 +136,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
|
||||
initialConfig.KubeReserved = map[string]string{
|
||||
string(v1.ResourceMemory): kubeReserved.String(),
|
||||
}
|
||||
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
|
||||
initialConfig.EnforceNodeAllocatable = []string{kubetypes.NodeAllocatableEnforcementKey}
|
||||
initialConfig.CgroupsPerQOS = true
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, []podEvictSpec{
|
||||
@ -122,7 +154,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
|
||||
|
||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||
// Disk pressure is induced by running pods which consume disk space.
|
||||
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
@ -150,7 +182,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive
|
||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
|
||||
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
|
||||
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
@ -168,7 +200,8 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
|
||||
initialConfig.EvictionMaxPodGracePeriod = 30
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
// Ensure that pods are not evicted because of the eviction-hard threshold
|
||||
initialConfig.EvictionHard = map[string]string{}
|
||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||
initialConfig.EvictionHard = map[string]string{"memory.available": "0%"}
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
@ -184,13 +217,14 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
|
||||
})
|
||||
|
||||
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
|
||||
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Flaky] [Feature:LocalStorageCapacityIsolation]", func() {
|
||||
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
evictionTestTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true
|
||||
initialConfig.EvictionHard = map[string]string{}
|
||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||
initialConfig.EvictionHard = map[string]string{"memory.available": "0%"}
|
||||
})
|
||||
sizeLimit := resource.MustParse("100Mi")
|
||||
useOverLimit := 101 /* Mb */
|
||||
@ -236,7 +270,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
|
||||
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
|
||||
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
||||
// the higher priority pod.
|
||||
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test")
|
||||
expectedNodeCondition := v1.NodeMemoryPressure
|
||||
pressureTimeout := 10 * time.Minute
|
||||
@ -282,7 +316,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||
// PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods.
|
||||
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
||||
// the higher priority pod.
|
||||
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test")
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
pressureTimeout := 10 * time.Minute
|
||||
@ -428,7 +462,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
@ -626,19 +660,19 @@ const (
|
||||
volumeName = "test-volume"
|
||||
)
|
||||
|
||||
func inodeConsumingPod(name string, volumeSource *v1.VolumeSource) *v1.Pod {
|
||||
func inodeConsumingPod(name string, numFiles int, volumeSource *v1.VolumeSource) *v1.Pod {
|
||||
// Each iteration creates an empty file
|
||||
return podWithCommand(volumeSource, v1.ResourceRequirements{}, name, "i=0; while true; do touch %s${i}.txt; sleep 0.001; i=$((i+=1)); done;")
|
||||
return podWithCommand(volumeSource, v1.ResourceRequirements{}, numFiles, name, "touch %s${i}.txt; sleep 0.001")
|
||||
}
|
||||
|
||||
func diskConsumingPod(name string, diskConsumedMB int, volumeSource *v1.VolumeSource, resources v1.ResourceRequirements) *v1.Pod {
|
||||
// Each iteration writes 1 Mb, so do diskConsumedMB iterations.
|
||||
return podWithCommand(volumeSource, resources, name, fmt.Sprintf("i=0; while [ $i -lt %d ];", diskConsumedMB)+" do dd if=/dev/urandom of=%s${i} bs=1048576 count=1 2>/dev/null ; i=$(($i+1)); done; while true; do sleep 5; done")
|
||||
return podWithCommand(volumeSource, resources, diskConsumedMB, name, "dd if=/dev/urandom of=%s${i} bs=1048576 count=1 2>/dev/null")
|
||||
}
|
||||
|
||||
// podWithCommand returns a pod with the provided volumeSource and resourceRequirements.
|
||||
// If a volumeSource is provided, then the volumeMountPath to the volume is inserted into the provided command.
|
||||
func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirements, name, command string) *v1.Pod {
|
||||
func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirements, iterations int, name, command string) *v1.Pod {
|
||||
path := ""
|
||||
volumeMounts := []v1.VolumeMount{}
|
||||
volumes := []v1.Volume{}
|
||||
@ -658,7 +692,7 @@ func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirem
|
||||
Command: []string{
|
||||
"sh",
|
||||
"-c",
|
||||
fmt.Sprintf(command, filepath.Join(path, "file")),
|
||||
fmt.Sprintf("i=0; while [ $i -lt %d ]; do %s; i=$(($i+1)); done; while true; do sleep 5; done", iterations, fmt.Sprintf(command, filepath.Join(path, "file"))),
|
||||
},
|
||||
Resources: resources,
|
||||
VolumeMounts: volumeMounts,
|
||||
@ -668,3 +702,50 @@ func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirem
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *v1.Pod {
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "MEMORY_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// If there is a limit specified, pass 80% of it for -mem-total, otherwise use the downward API
|
||||
// to pass limits.memory, which will be the total memory available.
|
||||
// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
|
||||
// which will cause the test to fail inappropriately.
|
||||
var memLimit string
|
||||
if limit, ok := res.Limits[v1.ResourceMemory]; ok {
|
||||
memLimit = strconv.Itoa(int(
|
||||
float64(limit.Value()) * 0.8))
|
||||
} else {
|
||||
memLimit = "$(MEMORY_LIMIT)"
|
||||
}
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: ctnName,
|
||||
Image: "k8s.gcr.io/stress:v1",
|
||||
ImagePullPolicy: "Always",
|
||||
Env: env,
|
||||
// 60 min timeout * 60s / tick per 10s = 360 ticks before timeout => ~11.11Mi/tick
|
||||
// to fill ~4Gi of memory, so initial ballpark 12Mi/tick.
|
||||
// We might see flakes due to timeout if the total memory on the nodes increases.
|
||||
Args: []string{"-mem-alloc-size", "12Mi", "-mem-alloc-sleep", "10s", "-mem-total", memLimit},
|
||||
Resources: res,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
73
vendor/k8s.io/kubernetes/test/e2e_node/garbage_collector_test.go
generated
vendored
73
vendor/k8s.io/kubernetes/test/e2e_node/garbage_collector_test.go
generated
vendored
@ -19,12 +19,13 @@ package e2e_node
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -130,18 +131,43 @@ var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
// TODO (dashpole): Once the Container Runtime Interface (CRI) is complete, generalize run on other runtimes (other than docker)
|
||||
dockerContainerGCTest(f, test)
|
||||
containerGCTest(f, test)
|
||||
}
|
||||
})
|
||||
|
||||
// Tests the following:
|
||||
// pods are created, and all containers restart the specified number of times
|
||||
// while contianers are running, the number of copies of a single container does not exceed maxPerPodContainer
|
||||
// while containers are running, the number of copies of a single container does not exceed maxPerPodContainer
|
||||
// while containers are running, the total number of containers does not exceed maxTotalContainers
|
||||
// while containers are running, if not constrained by maxPerPodContainer or maxTotalContainers, keep an extra copy of each container
|
||||
// once pods are killed, all containers are eventually cleaned up
|
||||
func containerGCTest(f *framework.Framework, test testRun) {
|
||||
var runtime internalapi.RuntimeService
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
runtime, _, err = getCRIClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
for _, pod := range test.testPods {
|
||||
// Initialize the getContainerNames function to use CRI runtime client.
|
||||
pod.getContainerNames = func() ([]string, error) {
|
||||
relevantContainers := []string{}
|
||||
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
|
||||
LabelSelector: map[string]string{
|
||||
types.KubernetesPodNameLabel: pod.podName,
|
||||
types.KubernetesPodNamespaceLabel: f.Namespace.Name,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return relevantContainers, err
|
||||
}
|
||||
for _, container := range containers {
|
||||
relevantContainers = append(relevantContainers, container.Labels[types.KubernetesContainerNameLabel])
|
||||
}
|
||||
return relevantContainers, nil
|
||||
}
|
||||
}
|
||||
|
||||
Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
|
||||
BeforeEach(func() {
|
||||
realPods := getPods(test.testPods)
|
||||
@ -175,7 +201,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
||||
for i := 0; i < pod.numContainers; i++ {
|
||||
containerCount := 0
|
||||
for _, containerName := range containerNames {
|
||||
if strings.Contains(containerName, pod.getContainerName(i)) {
|
||||
if containerName == pod.getContainerName(i) {
|
||||
containerCount += 1
|
||||
}
|
||||
}
|
||||
@ -203,7 +229,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
||||
for i := 0; i < pod.numContainers; i++ {
|
||||
containerCount := 0
|
||||
for _, containerName := range containerNames {
|
||||
if strings.Contains(containerName, pod.getContainerName(i)) {
|
||||
if containerName == pod.getContainerName(i) {
|
||||
containerCount += 1
|
||||
}
|
||||
}
|
||||
@ -245,39 +271,6 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
||||
})
|
||||
}
|
||||
|
||||
// Runs containerGCTest using the docker runtime.
|
||||
func dockerContainerGCTest(f *framework.Framework, test testRun) {
|
||||
var runtime libdocker.Interface
|
||||
BeforeEach(func() {
|
||||
runtime = libdocker.ConnectToDockerOrDie(
|
||||
defaultDockerEndpoint,
|
||||
defaultRuntimeRequestTimeoutDuration,
|
||||
defaultImagePullProgressDeadline,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
})
|
||||
for _, pod := range test.testPods {
|
||||
// Initialize the getContainerNames function to use the libdocker api
|
||||
thisPrefix := pod.containerPrefix
|
||||
pod.getContainerNames = func() ([]string, error) {
|
||||
relevantContainers := []string{}
|
||||
dockerContainers, err := libdocker.GetKubeletDockerContainers(runtime, true)
|
||||
if err != nil {
|
||||
return relevantContainers, err
|
||||
}
|
||||
for _, container := range dockerContainers {
|
||||
// only look for containers from this testspec
|
||||
if strings.Contains(container.Names[0], thisPrefix) {
|
||||
relevantContainers = append(relevantContainers, container.Names[0])
|
||||
}
|
||||
}
|
||||
return relevantContainers, nil
|
||||
}
|
||||
}
|
||||
containerGCTest(f, test)
|
||||
}
|
||||
|
||||
func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
||||
for _, spec := range specs {
|
||||
By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e_node/gke_environment_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e_node/gke_environment_test.go
generated
vendored
@ -84,7 +84,7 @@ func checkIPTables() (err error) {
|
||||
// checkPublicGCR checks the access to the public Google Container Registry by
|
||||
// pulling the busybox image.
|
||||
func checkPublicGCR() error {
|
||||
const image = "gcr.io/google-containers/busybox"
|
||||
const image = "k8s.gcr.io/busybox"
|
||||
output, err := runCommand("docker", "images", "-q", image)
|
||||
if len(output) != 0 {
|
||||
if _, err := runCommand("docker", "rmi", "-f", image); err != nil {
|
||||
@ -170,7 +170,7 @@ func checkDockerConfig() error {
|
||||
// checkDockerNetworkClient checks client networking by pinging an external IP
|
||||
// address from a container.
|
||||
func checkDockerNetworkClient() error {
|
||||
const imageName = "gcr.io/google-containers/busybox"
|
||||
const imageName = "k8s.gcr.io/busybox"
|
||||
output, err := runCommand("docker", "run", "--rm", imageName, "sh", "-c", "ping -w 5 -q google.com")
|
||||
if err != nil {
|
||||
return err
|
||||
@ -185,7 +185,7 @@ func checkDockerNetworkClient() error {
|
||||
// within a container and accessing it from outside.
|
||||
func checkDockerNetworkServer() error {
|
||||
const (
|
||||
imageName = "gcr.io/google-containers/nginx:1.7.9"
|
||||
imageName = "k8s.gcr.io/nginx:1.7.9"
|
||||
hostAddr = "127.0.0.1"
|
||||
hostPort = "8088"
|
||||
containerPort = "80"
|
||||
|
109
vendor/k8s.io/kubernetes/test/e2e_node/gpu_device_plugin.go
generated
vendored
109
vendor/k8s.io/kubernetes/test/e2e_node/gpu_device_plugin.go
generated
vendored
@ -17,17 +17,11 @@ limitations under the License.
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
@ -38,8 +32,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
devicePluginFeatureGate = "DevicePlugins=true"
|
||||
testPodNamePrefix = "nvidia-gpu-"
|
||||
testPodNamePrefix = "nvidia-gpu-"
|
||||
)
|
||||
|
||||
// Serial because the test restarts Kubelet
|
||||
@ -47,11 +40,6 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
f := framework.NewDefaultFramework("device-plugin-gpus-errors")
|
||||
|
||||
Context("DevicePlugin", func() {
|
||||
By("Enabling support for Device Plugin")
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.DevicePlugins)] = true
|
||||
})
|
||||
|
||||
var devicePluginPod *v1.Pod
|
||||
BeforeEach(func() {
|
||||
By("Ensuring that Nvidia GPUs exists on the node")
|
||||
@ -59,8 +47,6 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
Skip("Nvidia GPUs do not exist on the node. Skipping test.")
|
||||
}
|
||||
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||
|
||||
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
|
||||
devicePluginPod = f.PodClient().CreateSync(framework.NVIDIADevicePlugin(f.Namespace.Name))
|
||||
|
||||
@ -89,24 +75,28 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
|
||||
It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
|
||||
By("Creating one GPU pod on a node with at least two GPUs")
|
||||
p1 := f.PodClient().CreateSync(makeCudaPauseImage())
|
||||
count1, devId1 := getDeviceId(f, p1.Name, p1.Name, 1)
|
||||
podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"
|
||||
p1 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
|
||||
|
||||
deviceIDRE := "gpu devices: (nvidia[0-9]+)"
|
||||
count1, devId1 := parseLogFromNRuns(f, p1.Name, p1.Name, 1, deviceIDRE)
|
||||
p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Restarting Kubelet and waiting for the current running pod to restart")
|
||||
restartKubelet(f)
|
||||
restartKubelet()
|
||||
|
||||
By("Confirming that after a kubelet and pod restart, GPU assignement is kept")
|
||||
count1, devIdRestart1 := getDeviceId(f, p1.Name, p1.Name, count1+1)
|
||||
count1, devIdRestart1 := parseLogFromNRuns(f, p1.Name, p1.Name, count1+1, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
||||
By("Restarting Kubelet and creating another pod")
|
||||
restartKubelet(f)
|
||||
p2 := f.PodClient().CreateSync(makeCudaPauseImage())
|
||||
restartKubelet()
|
||||
p2 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
|
||||
|
||||
By("Checking that pods got a different GPU")
|
||||
count2, devId2 := getDeviceId(f, p2.Name, p2.Name, 1)
|
||||
count2, devId2 := parseLogFromNRuns(f, p2.Name, p2.Name, 1, deviceIDRE)
|
||||
|
||||
Expect(devId1).To(Not(Equal(devId2)))
|
||||
|
||||
By("Deleting device plugin.")
|
||||
@ -118,16 +108,16 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
return framework.NumberOfNVIDIAGPUs(node) <= 0
|
||||
}, 10*time.Minute, framework.Poll).Should(BeTrue())
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||
count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+1)
|
||||
count1, devIdRestart1 = parseLogFromNRuns(f, p1.Name, p1.Name, count1+1, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 := getDeviceId(f, p2.Name, p2.Name, count2+1)
|
||||
count2, devIdRestart2 := parseLogFromNRuns(f, p2.Name, p2.Name, count2+1, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
By("Restarting Kubelet.")
|
||||
restartKubelet(f)
|
||||
restartKubelet()
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
|
||||
count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+2)
|
||||
count1, devIdRestart1 = parseLogFromNRuns(f, p1.Name, p1.Name, count1+2, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 = getDeviceId(f, p2.Name, p2.Name, count2+2)
|
||||
count2, devIdRestart2 = parseLogFromNRuns(f, p2.Name, p2.Name, count2+2, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
logDevicePluginMetrics()
|
||||
|
||||
@ -165,68 +155,3 @@ func logDevicePluginMetrics() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeCudaPauseImage() *v1.Pod {
|
||||
podName := testPodNamePrefix + string(uuid.NewUUID())
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: []v1.Container{{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
// Retrieves the gpu devices created in the user pod.
|
||||
// Note the nvidia device plugin implementation doesn't do device id remapping currently.
|
||||
// Will probably need to use nvidia-smi if that changes.
|
||||
Command: []string{"sh", "-c", "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"},
|
||||
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: newDecimalResourceList(framework.NVIDIAGPUResourceName, 1),
|
||||
Requests: newDecimalResourceList(framework.NVIDIAGPUResourceName, 1),
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newDecimalResourceList(name v1.ResourceName, quantity int64) v1.ResourceList {
|
||||
return v1.ResourceList{name: *resource.NewQuantity(quantity, resource.DecimalSI)}
|
||||
}
|
||||
|
||||
// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494
|
||||
func restartKubelet(f *framework.Framework) {
|
||||
stdout, err := exec.Command("sudo", "systemctl", "list-units", "kubelet*", "--state=running").CombinedOutput()
|
||||
framework.ExpectNoError(err)
|
||||
regex := regexp.MustCompile("(kubelet-[0-9]+)")
|
||||
matches := regex.FindStringSubmatch(string(stdout))
|
||||
Expect(len(matches)).NotTo(BeZero())
|
||||
kube := matches[0]
|
||||
framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
|
||||
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout)
|
||||
}
|
||||
|
||||
func getDeviceId(f *framework.Framework, podName string, contName string, restartCount int32) (int32, string) {
|
||||
var count int32
|
||||
// Wait till pod has been restarted at least restartCount times.
|
||||
Eventually(func() bool {
|
||||
p, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||
return false
|
||||
}
|
||||
count = p.Status.ContainerStatuses[0].RestartCount
|
||||
return count >= restartCount
|
||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||
}
|
||||
framework.Logf("got pod logs: %v", logs)
|
||||
regex := regexp.MustCompile("gpu devices: (nvidia[0-9]+)")
|
||||
matches := regex.FindStringSubmatch(logs)
|
||||
if len(matches) < 2 {
|
||||
return count, ""
|
||||
}
|
||||
return count, matches[1]
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/gubernator.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/gubernator.sh
generated
vendored
@ -22,7 +22,7 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source cluster/lib/logging.sh
|
||||
source hack/lib/logging.sh
|
||||
|
||||
|
||||
if [[ $# -eq 0 || ! $1 =~ ^[Yy]$ ]]; then
|
||||
|
248
vendor/k8s.io/kubernetes/test/e2e_node/hugepages_test.go
generated
vendored
Normal file
248
vendor/k8s.io/kubernetes/test/e2e_node/hugepages_test.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// makePodToVerifyHugePages returns a pod that verifies specified cgroup with hugetlb
|
||||
func makePodToVerifyHugePages(cgroupName cm.CgroupName, hugePagesLimit resource.Quantity) *apiv1.Pod {
|
||||
// convert the cgroup name to its literal form
|
||||
cgroupFsName := ""
|
||||
cgroupName = cm.CgroupName(path.Join(defaultNodeAllocatableCgroup, string(cgroupName)))
|
||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
||||
cgroupFsName = cm.ConvertCgroupNameToSystemd(cgroupName, true)
|
||||
} else {
|
||||
cgroupFsName = string(cgroupName)
|
||||
}
|
||||
|
||||
// this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes
|
||||
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName)
|
||||
framework.Logf("Pod to run command: %v", command)
|
||||
pod := &apiv1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
RestartPolicy: apiv1.RestartPolicyNever,
|
||||
Containers: []apiv1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", command},
|
||||
VolumeMounts: []apiv1.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []apiv1.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// enableHugePagesInKubelet enables hugepages feature for kubelet
|
||||
func enableHugePagesInKubelet(f *framework.Framework) *kubeletconfig.KubeletConfiguration {
|
||||
oldCfg, err := getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
newCfg := oldCfg.DeepCopy()
|
||||
if newCfg.FeatureGates == nil {
|
||||
newCfg.FeatureGates = make(map[string]bool)
|
||||
newCfg.FeatureGates["HugePages"] = true
|
||||
}
|
||||
|
||||
// Update the Kubelet configuration.
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||
|
||||
// Wait for the Kubelet to be ready.
|
||||
Eventually(func() bool {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
return len(nodeList.Items) == 1
|
||||
}, time.Minute, time.Second).Should(BeTrue())
|
||||
|
||||
return oldCfg
|
||||
}
|
||||
|
||||
// configureHugePages attempts to allocate 100Mi of 2Mi hugepages for testing purposes
|
||||
func configureHugePages() error {
|
||||
err := exec.Command("/bin/sh", "-c", "echo 50 > /proc/sys/vm/nr_hugepages").Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outData, err := exec.Command("/bin/sh", "-c", "cat /proc/meminfo | grep 'HugePages_Total' | awk '{print $2}'").Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numHugePages, err := strconv.Atoi(strings.TrimSpace(string(outData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
framework.Logf("HugePages_Total is set to %v", numHugePages)
|
||||
if numHugePages == 50 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("expected hugepages %v, but found %v", 50, numHugePages)
|
||||
}
|
||||
|
||||
// releaseHugePages releases all pre-allocated hugepages
|
||||
func releaseHugePages() error {
|
||||
return exec.Command("/bin/sh", "-c", "echo 0 > /proc/sys/vm/nr_hugepages").Run()
|
||||
}
|
||||
|
||||
// isHugePageSupported returns true if the default hugepagesize on host is 2Mi (i.e. 2048 kB)
|
||||
func isHugePageSupported() bool {
|
||||
outData, err := exec.Command("/bin/sh", "-c", "cat /proc/meminfo | grep 'Hugepagesize:' | awk '{print $2}'").Output()
|
||||
framework.ExpectNoError(err)
|
||||
pageSize, err := strconv.Atoi(strings.TrimSpace(string(outData)))
|
||||
framework.ExpectNoError(err)
|
||||
return pageSize == 2048
|
||||
}
|
||||
|
||||
// pollResourceAsString polls for a specified resource and capacity from node
|
||||
func pollResourceAsString(f *framework.Framework, resourceName string) string {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
amount := amountOfResourceAsString(node, resourceName)
|
||||
framework.Logf("amount of %v: %v", resourceName, amount)
|
||||
return amount
|
||||
}
|
||||
|
||||
// amountOfResourceAsString returns the amount of resourceName advertised by a node
|
||||
func amountOfResourceAsString(node *apiv1.Node, resourceName string) string {
|
||||
val, ok := node.Status.Capacity[apiv1.ResourceName(resourceName)]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return val.String()
|
||||
}
|
||||
|
||||
func runHugePagesTests(f *framework.Framework) {
|
||||
It("should assign hugepages as expected based on the Pod spec", func() {
|
||||
By("by running a G pod that requests hugepages")
|
||||
pod := f.PodClient().Create(&apiv1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: apiv1.ResourceRequirements{
|
||||
Limits: apiv1.ResourceList{
|
||||
apiv1.ResourceName("cpu"): resource.MustParse("10m"),
|
||||
apiv1.ResourceName("memory"): resource.MustParse("100Mi"),
|
||||
apiv1.ResourceName("hugepages-2Mi"): resource.MustParse("50Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
podUID := string(pod.UID)
|
||||
By("checking if the expected hugetlb settings were applied")
|
||||
verifyPod := makePodToVerifyHugePages(cm.CgroupName("pod"+podUID), resource.MustParse("50Mi"))
|
||||
f.PodClient().Create(verifyPod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages]", func() {
|
||||
f := framework.NewDefaultFramework("hugepages-test")
|
||||
|
||||
Context("With config updated with hugepages feature enabled", func() {
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
|
||||
BeforeEach(func() {
|
||||
By("verifying hugepages are supported")
|
||||
if !isHugePageSupported() {
|
||||
framework.Skipf("skipping test because hugepages are not supported")
|
||||
return
|
||||
}
|
||||
By("configuring the host to reserve a number of pre-allocated hugepages")
|
||||
Eventually(func() error {
|
||||
err := configureHugePages()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, framework.Poll).Should(BeNil())
|
||||
By("enabling hugepages in kubelet")
|
||||
oldCfg = enableHugePagesInKubelet(f)
|
||||
By("restarting kubelet to pick up pre-allocated hugepages")
|
||||
restartKubelet()
|
||||
By("by waiting for hugepages resource to become available on the local node")
|
||||
Eventually(func() string {
|
||||
return pollResourceAsString(f, "hugepages-2Mi")
|
||||
}, 30*time.Second, framework.Poll).Should(Equal("100Mi"))
|
||||
})
|
||||
|
||||
runHugePagesTests(f)
|
||||
|
||||
AfterEach(func() {
|
||||
By("Releasing hugepages")
|
||||
Eventually(func() error {
|
||||
err := releaseHugePages()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, framework.Poll).Should(BeNil())
|
||||
if oldCfg != nil {
|
||||
By("Restoring old kubelet config")
|
||||
setOldKubeletConfig(f, oldCfg)
|
||||
}
|
||||
By("restarting kubelet to release hugepages")
|
||||
restartKubelet()
|
||||
By("by waiting for hugepages resource to not appear available on the local node")
|
||||
Eventually(func() string {
|
||||
return pollResourceAsString(f, "hugepages-2Mi")
|
||||
}, 30*time.Second, framework.Poll).Should(Equal("0"))
|
||||
})
|
||||
})
|
||||
})
|
2
vendor/k8s.io/kubernetes/test/e2e_node/image_id_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/image_id_test.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
|
||||
var _ = framework.KubeDescribe("ImageID", func() {
|
||||
|
||||
busyBoxImage := "gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff"
|
||||
busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff"
|
||||
|
||||
f := framework.NewDefaultFramework("image-id-test")
|
||||
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e_node/image_list.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e_node/image_list.go
generated
vendored
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
@ -27,8 +26,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -39,23 +37,21 @@ const (
|
||||
maxImagePullRetries = 5
|
||||
// Sleep duration between image pull retry attempts.
|
||||
imagePullRetryDelay = time.Second
|
||||
// connection timeout for gRPC image service connection
|
||||
imageServiceConnectionTimeout = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NodeImageWhiteList is a list of images used in node e2e test. These images will be prepulled
|
||||
// before test running so that the image pulling won't fail in actual test.
|
||||
var NodeImageWhiteList = sets.NewString(
|
||||
"google/cadvisor:latest",
|
||||
"gcr.io/google-containers/stress:v1",
|
||||
"k8s.gcr.io/stress:v1",
|
||||
busyboxImage,
|
||||
"gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff",
|
||||
"gcr.io/google_containers/node-problem-detector:v0.4.1",
|
||||
"k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff",
|
||||
"k8s.gcr.io/node-problem-detector:v0.4.1",
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
imageutils.GetE2EImage(imageutils.ServeHostname),
|
||||
imageutils.GetE2EImage(imageutils.Netexec),
|
||||
imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||
framework.GetPauseImageNameForHostArch(),
|
||||
imageutils.GetPauseImageNameForHostArch(),
|
||||
framework.GetGPUDevicePluginImage(),
|
||||
)
|
||||
|
||||
@ -107,17 +103,7 @@ func getPuller() (puller, error) {
|
||||
case "docker":
|
||||
return &dockerPuller{}, nil
|
||||
case "remote":
|
||||
endpoint := framework.TestContext.ContainerRuntimeEndpoint
|
||||
if framework.TestContext.ImageServiceEndpoint != "" {
|
||||
//ImageServiceEndpoint is the same as ContainerRuntimeEndpoint if not
|
||||
//explicitly specified
|
||||
//https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet.go#L517
|
||||
endpoint = framework.TestContext.ImageServiceEndpoint
|
||||
}
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("can't prepull images, no remote endpoint provided")
|
||||
}
|
||||
is, err := remote.NewRemoteImageService(endpoint, imageServiceConnectionTimeout)
|
||||
_, is, err := getCRIClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/cos-init-docker.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/cos-init-docker.yaml
generated
vendored
@ -27,7 +27,7 @@ write_files:
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# This script reads a GCE metadata key for the user speficied Docker
|
||||
# This script reads a GCE metadata key for the user specified Docker
|
||||
# version, downloads, and replaces the builtin Docker with it.
|
||||
|
||||
set -x
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/gci-init-gpu.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/gci-init-gpu.yaml
generated
vendored
@ -2,7 +2,7 @@
|
||||
|
||||
runcmd:
|
||||
- modprobe configs
|
||||
- docker run -v /dev:/dev -v /home/kubernetes/bin/nvidia:/rootfs/nvidia -v /etc/os-release:/rootfs/etc/os-release -v /proc/sysrq-trigger:/sysrq -e BASE_DIR=/rootfs/nvidia --privileged gcr.io/google_containers/cos-nvidia-driver-install@sha256:cb55c7971c337fece62f2bfe858662522a01e43ac9984a2dd1dd5c71487d225c
|
||||
- docker run -v /dev:/dev -v /home/kubernetes/bin/nvidia:/rootfs/nvidia -v /etc/os-release:/rootfs/etc/os-release -v /proc/sysrq-trigger:/sysrq -e BASE_DIR=/rootfs/nvidia --privileged k8s.gcr.io/cos-nvidia-driver-install@sha256:cb55c7971c337fece62f2bfe858662522a01e43ac9984a2dd1dd5c71487d225c
|
||||
- mount /tmp /tmp -o remount,exec,suid
|
||||
- usermod -a -G docker jenkins
|
||||
- mkdir -p /var/lib/kubelet
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties
generated
vendored
@ -6,7 +6,8 @@ GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ubuntu-node
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TEST_ARGS='--generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
TIMEOUT=1h
|
||||
# Use the system spec defined in test/e2e_node/system/specs/gke.yaml.
|
||||
SYSTEM_SPEC_NAME=gke
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci.properties
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci.properties
generated
vendored
@ -4,5 +4,6 @@ GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TEST_ARGS='--generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
TIMEOUT=1h
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-flaky.properties
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-flaky.properties
generated
vendored
@ -4,8 +4,8 @@ GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Flaky\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,LocalStorageCapacityIsolation=true,PodPriority=true'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,LocalStorageCapacityIsolation=true,PodPriority=true --generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-pull.properties
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-pull.properties
generated
vendored
@ -4,5 +4,6 @@ GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-pr-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TEST_ARGS='--generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties
generated
vendored
@ -6,8 +6,8 @@ GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ubuntu-node-serial
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
# Use the system spec defined at test/e2e_node/system/specs/gke.yaml.
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial.properties
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial.properties
generated
vendored
@ -4,7 +4,7 @@ GCE_ZONE=us-west1-b
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/ubuntu-init-docker.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/ubuntu-init-docker.yaml
generated
vendored
@ -27,7 +27,7 @@ write_files:
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# This script reads a GCE metadata key for the user speficied Docker
|
||||
# This script reads a GCE metadata key for the user specified Docker
|
||||
# version, downloads, and replaces the builtin Docker with it.
|
||||
|
||||
set -x
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/lifecycle_hook_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/lifecycle_hook_test.go
generated
vendored
@ -116,7 +116,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", framework.GetPauseImageNameForHostArch(), lifecycle)
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageNameForHostArch(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute prestop http hook properly", func() {
|
||||
@ -129,7 +129,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-prestop-http-hook", framework.GetPauseImageNameForHostArch(), lifecycle)
|
||||
podWithHook := getPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageNameForHostArch(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
})
|
||||
|
24
vendor/k8s.io/kubernetes/test/e2e_node/log_path_test.go
generated
vendored
24
vendor/k8s.io/kubernetes/test/e2e_node/log_path_test.go
generated
vendored
@ -39,6 +39,30 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-container-log-path")
|
||||
Describe("Pod with a container", func() {
|
||||
Context("printed log to stdout", func() {
|
||||
BeforeEach(func() {
|
||||
if framework.TestContext.ContainerRuntime == "docker" {
|
||||
// Container Log Path support requires JSON logging driver.
|
||||
// It does not work when Docker daemon is logging to journald.
|
||||
d, err := getDockerLoggingDriver()
|
||||
framework.ExpectNoError(err)
|
||||
if d != "json-file" {
|
||||
framework.Skipf("Skipping because Docker daemon is using a logging driver other than \"json-file\": %s", d)
|
||||
}
|
||||
// Even if JSON logging is in use, this test fails if SELinux support
|
||||
// is enabled, since the isolation provided by the SELinux policy
|
||||
// prevents processes running inside Docker containers (under SELinux
|
||||
// type svirt_lxc_net_t) from accessing the log files which are owned
|
||||
// by Docker (and labeled with the container_var_lib_t type.)
|
||||
//
|
||||
// Therefore, let's also skip this test when running with SELinux
|
||||
// support enabled.
|
||||
e, err := isDockerSELinuxSupportEnabled()
|
||||
framework.ExpectNoError(err)
|
||||
if e {
|
||||
framework.Skipf("Skipping because Docker daemon is running with SELinux support enabled")
|
||||
}
|
||||
}
|
||||
})
|
||||
It("should print log to correct log path", func() {
|
||||
podClient := f.PodClient()
|
||||
ns := f.Namespace.Name
|
||||
|
287
vendor/k8s.io/kubernetes/test/e2e_node/memory_eviction_test.go
generated
vendored
287
vendor/k8s.io/kubernetes/test/e2e_node/memory_eviction_test.go
generated
vendored
@ -1,287 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Eviction Policy is described here:
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/kubelet-eviction.md
|
||||
|
||||
var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
var (
|
||||
evictionHard = map[string]string{"memory.available": "40%"}
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("eviction-test")
|
||||
|
||||
// This is a dummy context to wrap the outer AfterEach, which will run after the inner AfterEach.
|
||||
// We want to list all of the node and pod events, including any that occur while waiting for
|
||||
// memory pressure reduction, even if we time out while waiting.
|
||||
Context("", func() {
|
||||
|
||||
AfterEach(func() {
|
||||
// Print events
|
||||
logNodeEvents(f)
|
||||
logPodEvents(f)
|
||||
})
|
||||
Context("", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(c *kubeletconfig.KubeletConfiguration) {
|
||||
c.EvictionHard = evictionHard
|
||||
})
|
||||
|
||||
Context("when there is memory pressure", func() {
|
||||
AfterEach(func() {
|
||||
// Wait for the memory pressure condition to disappear from the node status before continuing.
|
||||
By("waiting for the memory pressure condition on the node to disappear before ending the test.")
|
||||
Eventually(func() error {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("tried to get node list but got error: %v", err)
|
||||
}
|
||||
// Assuming that there is only one node, because this is a node e2e test.
|
||||
if len(nodeList.Items) != 1 {
|
||||
return fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
_, pressure := nodeutil.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
|
||||
if pressure != nil && pressure.Status == v1.ConditionTrue {
|
||||
return fmt.Errorf("node is still reporting memory pressure condition: %s", pressure)
|
||||
}
|
||||
return nil
|
||||
}, 5*time.Minute, 15*time.Second).Should(BeNil())
|
||||
|
||||
// Check available memory after condition disappears, just in case:
|
||||
// Wait for available memory to decrease to a reasonable level before ending the test.
|
||||
// This helps prevent interference with tests that start immediately after this one.
|
||||
By("waiting for available memory to decrease to a reasonable level before ending the test.")
|
||||
Eventually(func() error {
|
||||
summary, err := getNodeSummary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if summary.Node.Memory.AvailableBytes == nil {
|
||||
return fmt.Errorf("summary.Node.Memory.AvailableBytes was nil, cannot get memory stats.")
|
||||
}
|
||||
if summary.Node.Memory.WorkingSetBytes == nil {
|
||||
return fmt.Errorf("summary.Node.Memory.WorkingSetBytes was nil, cannot get memory stats.")
|
||||
}
|
||||
avail := *summary.Node.Memory.AvailableBytes
|
||||
wset := *summary.Node.Memory.WorkingSetBytes
|
||||
|
||||
// memory limit = avail + wset
|
||||
limit := avail + wset
|
||||
halflimit := limit / 2
|
||||
|
||||
// Wait for at least half of memory limit to be available
|
||||
if avail >= halflimit {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("current available memory is: %d bytes. Expected at least %d bytes available.", avail, halflimit)
|
||||
}, 5*time.Minute, 15*time.Second).Should(BeNil())
|
||||
|
||||
// TODO(mtaufen): 5 minute wait to stop flaky test bleeding while we figure out what is actually going on.
|
||||
// If related to pressure transition period in eviction manager, probably only need to wait
|
||||
// just over 30s becasue that is the transition period set for node e2e tests. But since we
|
||||
// know 5 min works and we don't know if transition period is the problem, wait 5 min for now.
|
||||
time.Sleep(5 * time.Minute)
|
||||
|
||||
// Finally, try starting a new pod and wait for it to be scheduled and running.
|
||||
// This is the final check to try to prevent interference with subsequent tests.
|
||||
podName := "admit-best-effort-pod"
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
It("should evict pods in the correct order (besteffort first, then burstable, then guaranteed)", func() {
|
||||
By("creating a guaranteed pod, a burstable pod, and a besteffort pod.")
|
||||
|
||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||
guaranteed := getMemhogPod("guaranteed-pod", "guaranteed", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}})
|
||||
guaranteed = f.PodClient().CreateSync(guaranteed)
|
||||
glog.Infof("pod created with name: %s", guaranteed.Name)
|
||||
|
||||
// A pod is burstable if limits and requests do not match across all containers.
|
||||
burstable := getMemhogPod("burstable-pod", "burstable", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}})
|
||||
burstable = f.PodClient().CreateSync(burstable)
|
||||
glog.Infof("pod created with name: %s", burstable.Name)
|
||||
|
||||
// A pod is besteffort if none of its containers have specified any requests or limits .
|
||||
besteffort := getMemhogPod("besteffort-pod", "besteffort", v1.ResourceRequirements{})
|
||||
besteffort = f.PodClient().CreateSync(besteffort)
|
||||
glog.Infof("pod created with name: %s", besteffort.Name)
|
||||
|
||||
// We poll until timeout or all pods are killed.
|
||||
// Inside the func, we check that all pods are in a valid phase with
|
||||
// respect to the eviction order of best effort, then burstable, then guaranteed.
|
||||
By("polling the Status.Phase of each pod and checking for violations of the eviction order.")
|
||||
Eventually(func() error {
|
||||
|
||||
gteed, gtErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(guaranteed.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(gtErr, fmt.Sprintf("getting pod %s", guaranteed.Name))
|
||||
gteedPh := gteed.Status.Phase
|
||||
|
||||
burst, buErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(burstable.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(buErr, fmt.Sprintf("getting pod %s", burstable.Name))
|
||||
burstPh := burst.Status.Phase
|
||||
|
||||
best, beErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(besteffort.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(beErr, fmt.Sprintf("getting pod %s", besteffort.Name))
|
||||
bestPh := best.Status.Phase
|
||||
|
||||
glog.Infof("pod phase: guaranteed: %v, burstable: %v, besteffort: %v", gteedPh, burstPh, bestPh)
|
||||
|
||||
// NOTE/TODO(mtaufen): This should help us debug why burstable appears to fail before besteffort in some
|
||||
// scenarios. We have seen some evidence that the eviction manager has in fact done the
|
||||
// right thing and evicted the besteffort first, and attempted to change the besteffort phase
|
||||
// to "Failed" when it evicts it, but that for some reason the test isn't seeing the updated
|
||||
// phase. I'm trying to confirm or deny this.
|
||||
// The eviction manager starts trying to evict things when the node comes under memory
|
||||
// pressure, and the eviction manager reports this information in the pressure condition. If we
|
||||
// see the eviction manager reporting a pressure condition for a while without the besteffort failing,
|
||||
// and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we
|
||||
// will have more reason to believe the phase is out of date.
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("tried to get node list but got error: %v", err)
|
||||
}
|
||||
if len(nodeList.Items) != 1 {
|
||||
glog.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
_, pressure := nodeutil.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
|
||||
glog.Infof("node pressure condition: %s", pressure)
|
||||
|
||||
// NOTE/TODO(mtaufen): Also log (at least temporarily) the actual memory consumption on the node.
|
||||
// I used this to plot memory usage from a successful test run and it looks the
|
||||
// way I would expect. I want to see what the plot from a flake looks like.
|
||||
summary, err := getNodeSummary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if summary.Node.Memory.WorkingSetBytes != nil {
|
||||
wset := *summary.Node.Memory.WorkingSetBytes
|
||||
glog.Infof("Node's working set is (bytes): %v", wset)
|
||||
|
||||
}
|
||||
|
||||
if bestPh == v1.PodRunning {
|
||||
Expect(burstPh).NotTo(Equal(v1.PodFailed), "burstable pod failed before best effort pod")
|
||||
Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before best effort pod")
|
||||
} else if burstPh == v1.PodRunning {
|
||||
Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before burstable pod")
|
||||
}
|
||||
|
||||
// When both besteffort and burstable have been evicted, the test has completed.
|
||||
if bestPh == v1.PodFailed && burstPh == v1.PodFailed {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("besteffort and burstable have not yet both been evicted.")
|
||||
|
||||
}, 60*time.Minute, 5*time.Second).Should(BeNil())
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *v1.Pod {
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "MEMORY_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// If there is a limit specified, pass 80% of it for -mem-total, otherwise use the downward API
|
||||
// to pass limits.memory, which will be the total memory available.
|
||||
// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
|
||||
// which will cause the test to fail inappropriately.
|
||||
var memLimit string
|
||||
if limit, ok := res.Limits[v1.ResourceMemory]; ok {
|
||||
memLimit = strconv.Itoa(int(
|
||||
float64(limit.Value()) * 0.8))
|
||||
} else {
|
||||
memLimit = "$(MEMORY_LIMIT)"
|
||||
}
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: ctnName,
|
||||
Image: "gcr.io/google-containers/stress:v1",
|
||||
ImagePullPolicy: "Always",
|
||||
Env: env,
|
||||
// 60 min timeout * 60s / tick per 10s = 360 ticks before timeout => ~11.11Mi/tick
|
||||
// to fill ~4Gi of memory, so initial ballpark 12Mi/tick.
|
||||
// We might see flakes due to timeout if the total memory on the nodes increases.
|
||||
Args: []string{"-mem-alloc-size", "12Mi", "-mem-alloc-sleep", "10s", "-mem-total", memLimit},
|
||||
Resources: res,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
12
vendor/k8s.io/kubernetes/test/e2e_node/mirror_pod_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e_node/mirror_pod_test.go
generated
vendored
@ -39,16 +39,16 @@ import (
|
||||
var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
f := framework.NewDefaultFramework("mirror-pod")
|
||||
Context("when create a mirror pod ", func() {
|
||||
var ns, manifestPath, staticPodName, mirrorPodName string
|
||||
var ns, podPath, staticPodName, mirrorPodName string
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||
|
||||
manifestPath = framework.TestContext.KubeletConfig.PodManifestPath
|
||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
||||
|
||||
By("create the static pod")
|
||||
err := createStaticPod(manifestPath, staticPodName, ns,
|
||||
err := createStaticPod(podPath, staticPodName, ns,
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim), v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
@ -64,8 +64,8 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
uid := pod.UID
|
||||
|
||||
By("update the static pod container image")
|
||||
image := framework.GetPauseImageNameForHostArch()
|
||||
err = createStaticPod(manifestPath, staticPodName, ns, image, v1.RestartPolicyAlways)
|
||||
image := imageutils.GetPauseImageNameForHostArch()
|
||||
err = createStaticPod(podPath, staticPodName, ns, image, v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be updated")
|
||||
@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
})
|
||||
AfterEach(func() {
|
||||
By("delete the static pod")
|
||||
err := deleteStaticPod(manifestPath, staticPodName, ns)
|
||||
err := deleteStaticPod(podPath, staticPodName, ns)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to disappear")
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/node_problem_detector_linux.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/node_problem_detector_linux.go
generated
vendored
@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
||||
pollInterval = 1 * time.Second
|
||||
pollConsistent = 5 * time.Second
|
||||
pollTimeout = 1 * time.Minute
|
||||
image = "gcr.io/google_containers/node-problem-detector:v0.4.1"
|
||||
image = "k8s.gcr.io/node-problem-detector:v0.4.1"
|
||||
)
|
||||
f := framework.NewDefaultFramework("node-problem-detector")
|
||||
var c clientset.Interface
|
||||
@ -66,7 +66,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
||||
framework.KubeDescribe("SystemLogMonitor", func() {
|
||||
const (
|
||||
// Use test condition to avoid changing the real node condition in use.
|
||||
// TODO(random-liu): Now node condition could be arbitrary string, consider wether we need to
|
||||
// TODO(random-liu): Now node condition could be arbitrary string, consider whether we need to
|
||||
// add TestCondition when switching to predefined condition list.
|
||||
condition = v1.NodeConditionType("TestCondition")
|
||||
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e_node/remote/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e_node/remote/BUILD
generated
vendored
@ -8,6 +8,7 @@ load(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cadvisor_e2e.go",
|
||||
"node_conformance.go",
|
||||
"node_e2e.go",
|
||||
"remote.go",
|
||||
|
77
vendor/k8s.io/kubernetes/test/e2e_node/remote/cadvisor_e2e.go
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/test/e2e_node/remote/cadvisor_e2e.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
)
|
||||
|
||||
// CAdvisorE2ERemote contains the specific functions in the cadvisor e2e test suite.
|
||||
type CAdvisorE2ERemote struct{}
|
||||
|
||||
// InitCAdvisorE2ERemote performs initialization for cadvisor remote testing
|
||||
func InitCAdvisorE2ERemote() TestSuite {
|
||||
return &CAdvisorE2ERemote{}
|
||||
}
|
||||
|
||||
// SetupTestPackage implements TestSuite.SetupTestPackage
|
||||
func (n *CAdvisorE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
|
||||
cadvisorRootDir, err := builder.GetCAdvisorRootDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// build the cadvisor binary and tests
|
||||
if err := runCommand(fmt.Sprintf("%s/build/prow_e2e.sh", cadvisorRootDir)); err != nil {
|
||||
return err
|
||||
}
|
||||
// transfer the entire directory to each node
|
||||
if err := runCommand("cp", "-R", cadvisorRootDir, fmt.Sprintf("%s/", tardir)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runCommand(command string, args ...string) error {
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s. error: %v", command, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunTest implements TestSuite.RunTest
|
||||
func (n *CAdvisorE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName string, timeout time.Duration) (string, error) {
|
||||
// Kill any running node processes
|
||||
cleanupNodeProcesses(host)
|
||||
|
||||
glog.V(2).Infof("Starting tests on %q", host)
|
||||
return SSH(host, "sh", "-c", getSSHCommand(" && ",
|
||||
fmt.Sprintf("cd %s/cadvisor", workspace),
|
||||
fmt.Sprintf("timeout -k 30s %fs ./build/integration.sh ../results/cadvisor.log",
|
||||
timeout.Seconds()),
|
||||
))
|
||||
}
|
20
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go
generated
vendored
20
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go
generated
vendored
@ -53,7 +53,7 @@ func commandToString(c *exec.Cmd) string {
|
||||
|
||||
// Image path constants.
|
||||
const (
|
||||
conformanceRegistry = "gcr.io/google_containers"
|
||||
conformanceRegistry = "k8s.gcr.io"
|
||||
conformanceArch = runtime.GOARCH
|
||||
conformanceTarfile = "node_conformance.tar"
|
||||
conformanceTestBinary = "e2e_node.test"
|
||||
@ -102,7 +102,7 @@ func buildConformanceTest(binDir, systemSpecName string) error {
|
||||
func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) error {
|
||||
// Build the executables
|
||||
if err := builder.BuildGo(); err != nil {
|
||||
return fmt.Errorf("failed to build the depedencies: %v", err)
|
||||
return fmt.Errorf("failed to build the dependencies: %v", err)
|
||||
}
|
||||
|
||||
// Make sure we can find the newly built binaries
|
||||
@ -146,15 +146,15 @@ func loadConformanceImage(host, workspace string) error {
|
||||
// kubeletLauncherLog is the log of kubelet launcher.
|
||||
const kubeletLauncherLog = "kubelet-launcher.log"
|
||||
|
||||
// kubeletPodManifestPath is a fixed known pod manifest path. We can not use the random pod
|
||||
// kubeletPodPath is a fixed known pod specification path. We can not use the random pod
|
||||
// manifest directory generated in e2e_node.test because we need to mount the directory into
|
||||
// the conformance test container, it's easier if it's a known directory.
|
||||
// TODO(random-liu): Get rid of this once we switch to cluster e2e node bootstrap script.
|
||||
var kubeletPodManifestPath = "conformance-pod-manifest-" + timestamp
|
||||
var kubeletPodPath = "conformance-pod-manifest-" + timestamp
|
||||
|
||||
// getPodManifestPath returns pod manifest full path.
|
||||
func getPodManifestPath(workspace string) string {
|
||||
return filepath.Join(workspace, kubeletPodManifestPath)
|
||||
// getPodPath returns pod manifest full path.
|
||||
func getPodPath(workspace string) string {
|
||||
return filepath.Join(workspace, kubeletPodPath)
|
||||
}
|
||||
|
||||
// isSystemd returns whether the node is a systemd node.
|
||||
@ -173,7 +173,7 @@ func isSystemd(host string) (bool, error) {
|
||||
// node conformance test.
|
||||
// TODO(random-liu): Switch to use standard node bootstrap script.
|
||||
func launchKubelet(host, workspace, results, testArgs string) error {
|
||||
podManifestPath := getPodManifestPath(workspace)
|
||||
podManifestPath := getPodPath(workspace)
|
||||
if output, err := SSH(host, "mkdir", podManifestPath); err != nil {
|
||||
return fmt.Errorf("failed to create kubelet pod manifest path %q: error - %v output - %q",
|
||||
podManifestPath, err, output)
|
||||
@ -249,7 +249,7 @@ func stopKubelet(host, workspace string) error {
|
||||
}
|
||||
glog.Info("Successfully stop kubelet")
|
||||
// Clean up the pod manifest path
|
||||
podManifestPath := getPodManifestPath(workspace)
|
||||
podManifestPath := getPodPath(workspace)
|
||||
if output, err := SSH(host, "rm", "-f", filepath.Join(workspace, podManifestPath)); err != nil {
|
||||
return fmt.Errorf("failed to cleanup pod manifest directory %q: error - %v, output - %q",
|
||||
podManifestPath, err, output)
|
||||
@ -291,7 +291,7 @@ func (c *ConformanceRemote) RunTest(host, workspace, results, imageDesc, junitFi
|
||||
|
||||
// Run the tests
|
||||
glog.V(2).Infof("Starting tests on %q", host)
|
||||
podManifestPath := getPodManifestPath(workspace)
|
||||
podManifestPath := getPodPath(workspace)
|
||||
cmd := fmt.Sprintf("'timeout -k 30s %fs docker run --rm --privileged=true --net=host -v /:/rootfs -v %s:%s -v %s:/var/result -e TEST_ARGS=--report-prefix=%s %s'",
|
||||
timeout.Seconds(), podManifestPath, podManifestPath, results, junitFilePrefix, getConformanceTestImageName(systemSpecName))
|
||||
testOutput, err := SSH(host, "sh", "-c", cmd)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go
generated
vendored
@ -45,7 +45,7 @@ func InitNodeE2ERemote() TestSuite {
|
||||
func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
|
||||
// Build the executables
|
||||
if err := builder.BuildGo(); err != nil {
|
||||
return fmt.Errorf("failed to build the depedencies: %v", err)
|
||||
return fmt.Errorf("failed to build the dependencies: %v", err)
|
||||
}
|
||||
|
||||
// Make sure we can find the newly built binaries
|
||||
|
17
vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go
generated
vendored
@ -139,21 +139,20 @@ func getTestArtifacts(host, testDir string) error {
|
||||
return fmt.Errorf("failed to create log directory %q: %v", logPath, err)
|
||||
}
|
||||
// Copy logs to artifacts/hostname
|
||||
_, err := runSSHCommand("scp", "-r", fmt.Sprintf("%s:%s/results/*.log", GetHostnameOrIp(host), testDir), logPath)
|
||||
if err != nil {
|
||||
if _, err := runSSHCommand("scp", "-r", fmt.Sprintf("%s:%s/results/*.log", GetHostnameOrIp(host), testDir), logPath); err != nil {
|
||||
return err
|
||||
}
|
||||
// Copy json files (if any) to artifacts.
|
||||
if _, err = SSH(host, "ls", fmt.Sprintf("%s/results/*.json", testDir)); err == nil {
|
||||
_, err = runSSHCommand("scp", "-r", fmt.Sprintf("%s:%s/results/*.json", GetHostnameOrIp(host), testDir), *resultsDir)
|
||||
if err != nil {
|
||||
if _, err := SSH(host, "ls", fmt.Sprintf("%s/results/*.json", testDir)); err == nil {
|
||||
if _, err = runSSHCommand("scp", "-r", fmt.Sprintf("%s:%s/results/*.json", GetHostnameOrIp(host), testDir), *resultsDir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Copy junit to the top of artifacts
|
||||
_, err = runSSHCommand("scp", fmt.Sprintf("%s:%s/results/junit*", GetHostnameOrIp(host), testDir), *resultsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
if _, err := SSH(host, "ls", fmt.Sprintf("%s/results/junit*", testDir)); err == nil {
|
||||
// Copy junit (if any) to the top of artifacts
|
||||
if _, err = runSSHCommand("scp", fmt.Sprintf("%s:%s/results/junit*", GetHostnameOrIp(host), testDir), *resultsDir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/remote/types.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/remote/types.go
generated
vendored
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package remote contains implementations of the TestSuite interface, which specify
|
||||
// how to run various node test suites remotely.
|
||||
package remote
|
||||
|
||||
import (
|
||||
|
33
vendor/k8s.io/kubernetes/test/e2e_node/resource_collector.go
generated
vendored
33
vendor/k8s.io/kubernetes/test/e2e_node/resource_collector.go
generated
vendored
@ -45,6 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/perftype"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
@ -84,16 +85,16 @@ func NewResourceCollector(interval time.Duration) *ResourceCollector {
|
||||
// Start starts resource collector and connects to the standalone Cadvisor pod
|
||||
// then repeatedly runs collectStats.
|
||||
func (r *ResourceCollector) Start() {
|
||||
// Get the cgroup container names for kubelet and docker
|
||||
// Get the cgroup container names for kubelet and runtime
|
||||
kubeletContainer, err := getContainerNameForProcess(kubeletProcessName, "")
|
||||
dockerContainer, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
|
||||
runtimeContainer, err := getContainerNameForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
if err == nil {
|
||||
systemContainers = map[string]string{
|
||||
stats.SystemContainerKubelet: kubeletContainer,
|
||||
stats.SystemContainerRuntime: dockerContainer,
|
||||
stats.SystemContainerRuntime: runtimeContainer,
|
||||
}
|
||||
} else {
|
||||
framework.Failf("Failed to get docker container name in test-e2e-node resource collector.")
|
||||
framework.Failf("Failed to get runtime container name in test-e2e-node resource collector.")
|
||||
}
|
||||
|
||||
wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
@ -239,12 +240,11 @@ func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]f
|
||||
func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer) string {
|
||||
// Example output:
|
||||
//
|
||||
// Resource usage for node "e2e-test-foo-node-abcde":
|
||||
// container cpu(cores) memory(MB)
|
||||
// "/" 0.363 2942.09
|
||||
// "/docker-daemon" 0.088 521.80
|
||||
// "/kubelet" 0.086 424.37
|
||||
// "/system" 0.007 119.88
|
||||
// Resource usage:
|
||||
//container cpu(cores) memory_working_set(MB) memory_rss(MB)
|
||||
//"kubelet" 0.068 27.92 15.43
|
||||
//"runtime" 0.664 89.88 68.13
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "container\tcpu(cores)\tmemory_working_set(MB)\tmemory_rss(MB)\n")
|
||||
@ -257,7 +257,7 @@ func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer
|
||||
|
||||
func formatCPUSummary(summary framework.ContainersCPUSummary) string {
|
||||
// Example output for a node (the percentiles may differ):
|
||||
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
|
||||
// CPU usage of containers:
|
||||
// container 5th% 50th% 90th% 95th%
|
||||
// "/" 0.051 0.159 0.387 0.455
|
||||
// "/runtime 0.000 0.000 0.146 0.166
|
||||
@ -373,6 +373,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
wg.Add(1)
|
||||
go func(pod *v1.Pod) {
|
||||
defer GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
|
||||
@ -459,15 +460,7 @@ func (r *ResourceCollector) GetResourceTimeSeries() map[string]*perftype.Resourc
|
||||
return resourceSeries
|
||||
}
|
||||
|
||||
// Code for getting container name of docker, copied from pkg/kubelet/cm/container_manager_linux.go
|
||||
// since they are not exposed
|
||||
const (
|
||||
kubeletProcessName = "kubelet"
|
||||
dockerProcessName = "docker"
|
||||
dockerPidFile = "/var/run/docker.pid"
|
||||
containerdProcessName = "docker-containerd"
|
||||
containerdPidFile = "/run/docker/libcontainerd/docker-containerd.pid"
|
||||
)
|
||||
const kubeletProcessName = "kubelet"
|
||||
|
||||
func getPidsForProcess(name, pidFile string) ([]int, error) {
|
||||
if len(pidFile) > 0 {
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/resource_usage_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/resource_usage_test.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -142,7 +143,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
// sleep for an interval here to measure steady data
|
||||
sleepAfterCreatePods = 10 * time.Second
|
||||
)
|
||||
pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), "test_pod")
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), "test_pod")
|
||||
|
||||
rc.Start()
|
||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
||||
|
41
vendor/k8s.io/kubernetes/test/e2e_node/restart_test.go
generated
vendored
41
vendor/k8s.io/kubernetes/test/e2e_node/restart_test.go
generated
vendored
@ -28,8 +28,10 @@ import (
|
||||
"os/exec"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// waitForPods waits for timeout duration, for pod_count.
|
||||
@ -75,11 +77,11 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("restart-test")
|
||||
Context("Docker Daemon", func() {
|
||||
Context("Container Runtime", func() {
|
||||
Context("Network", func() {
|
||||
It("should recover from ip leak", func() {
|
||||
|
||||
pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
|
||||
pods := newTestPods(podCount, false, imageutils.GetPauseImageNameForHostArch(), "restart-container-runtime-test")
|
||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||
defer deletePodsSync(f, pods)
|
||||
@ -88,34 +90,47 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
||||
// startTimeout fit on the node and the node is now saturated.
|
||||
runningPods := waitForPods(f, podCount, startTimeout)
|
||||
if len(runningPods) < minPods {
|
||||
framework.Failf("Failed to start %d pods, cannot test that restarting docker doesn't leak IPs", minPods)
|
||||
framework.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods)
|
||||
}
|
||||
|
||||
for i := 0; i < restartCount; i += 1 {
|
||||
By(fmt.Sprintf("Restarting Docker Daemon iteration %d", i))
|
||||
|
||||
// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494
|
||||
if stdout, err := exec.Command("sudo", "systemctl", "restart", "docker").CombinedOutput(); err != nil {
|
||||
framework.Logf("Failed to trigger docker restart with systemd/systemctl: %v, stdout: %q", err, string(stdout))
|
||||
if stdout, err = exec.Command("sudo", "service", "docker", "restart").CombinedOutput(); err != nil {
|
||||
framework.Failf("Failed to trigger docker restart with upstart/service: %v, stdout: %q", err, string(stdout))
|
||||
By(fmt.Sprintf("Killing container runtime iteration %d", i))
|
||||
// Wait for container runtime to be running
|
||||
var pid int
|
||||
Eventually(func() error {
|
||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(runtimePids) != 1 {
|
||||
return fmt.Errorf("unexpected container runtime pid list: %+v", runtimePids)
|
||||
}
|
||||
// Make sure the container runtime is running, pid got from pid file may not be running.
|
||||
pid = runtimePids[0]
|
||||
if _, err := exec.Command("sudo", "ps", "-p", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 1*time.Minute, 2*time.Second).Should(BeNil())
|
||||
if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
|
||||
framework.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
|
||||
}
|
||||
// Assume that container runtime will be restarted by systemd/supervisord etc.
|
||||
time.Sleep(20 * time.Second)
|
||||
}
|
||||
|
||||
By("Checking currently Running/Ready pods")
|
||||
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
|
||||
if len(postRestartRunningPods) == 0 {
|
||||
framework.Failf("Failed to start *any* pods after docker restart, this might indicate an IP leak")
|
||||
framework.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
|
||||
}
|
||||
By("Confirm no containers have terminated")
|
||||
for _, pod := range postRestartRunningPods {
|
||||
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
|
||||
framework.Failf("Pod %q has failed containers %+v after docker restart, this might indicate an IP leak", pod.Name, c)
|
||||
framework.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
|
||||
}
|
||||
}
|
||||
By(fmt.Sprintf("Docker restart test passed with %d pods", len(postRestartRunningPods)))
|
||||
By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/BUILD
generated
vendored
@ -8,8 +8,7 @@ load(
|
||||
|
||||
go_binary(
|
||||
name = "local",
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/runner/local",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/BUILD
generated
vendored
@ -8,8 +8,7 @@ load(
|
||||
|
||||
go_binary(
|
||||
name = "remote",
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/runner/remote",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go
generated
vendored
@ -147,7 +147,7 @@ type GCEImage struct {
|
||||
Project string `json:"project"`
|
||||
Metadata string `json:"metadata"`
|
||||
ImageRegex string `json:"image_regex, omitempty"`
|
||||
// Defaults to using only the latest image. Acceptible values are [0, # of images that match the regex).
|
||||
// Defaults to using only the latest image. Acceptable values are [0, # of images that match the regex).
|
||||
// If the number of existing previous images is lesser than what is desired, the test will use that is available.
|
||||
PreviousImages int `json:"previous_images, omitempty"`
|
||||
|
||||
@ -178,6 +178,8 @@ func main() {
|
||||
switch *testSuite {
|
||||
case "conformance":
|
||||
suite = remote.InitConformanceRemote()
|
||||
case "cadvisor":
|
||||
suite = remote.InitCAdvisorE2ERemote()
|
||||
// TODO: Add subcommand for node soaking, node conformance, cri validation.
|
||||
case "default":
|
||||
// Use node e2e suite by default if no subcommand is specified.
|
||||
@ -354,6 +356,7 @@ func main() {
|
||||
if !exitOk {
|
||||
fmt.Printf("Failure: %d errors encountered.\n", errCount)
|
||||
callGubernator(*gubernator)
|
||||
arc.deleteArchive()
|
||||
os.Exit(1)
|
||||
}
|
||||
callGubernator(*gubernator)
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/runtime_conformance_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/runtime_conformance_test.go
generated
vendored
@ -275,13 +275,13 @@ while true; do sleep 1; done
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull non-existing image from gcr.io",
|
||||
image: "gcr.io/google_containers/invalid-image:invalid-tag",
|
||||
image: "k8s.gcr.io/invalid-image:invalid-tag",
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from gcr.io",
|
||||
image: "gcr.io/google_containers/alpine-with-bash:1.0",
|
||||
image: "k8s.gcr.io/alpine-with-bash:1.0",
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
|
36
vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go
generated
vendored
36
vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go
generated
vendored
@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
}
|
||||
createAndWaitHostIPCPod := func(podName string, hostNetwork bool) {
|
||||
podClient.Create(makeHostIPCPod(podName,
|
||||
busyboxImage,
|
||||
imageutils.GetE2EImage(imageutils.IpcUtils),
|
||||
[]string{"sh", "-c", "ipcs -m | awk '{print $2}'"},
|
||||
hostNetwork,
|
||||
))
|
||||
@ -150,7 +150,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
|
||||
hostSharedMemoryID := ""
|
||||
BeforeEach(func() {
|
||||
output, err := exec.Command("sh", "-c", "ipcmk -M 1M | awk '{print $NF}'").Output()
|
||||
output, err := exec.Command("sh", "-c", "ipcmk -M 1048576 | awk '{print $NF}'").Output()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create the shared memory on the host: %v", err)
|
||||
}
|
||||
@ -159,30 +159,30 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
})
|
||||
|
||||
It("should show the shared memory ID in the host IPC containers", func() {
|
||||
busyboxPodName := "busybox-hostipc-" + string(uuid.NewUUID())
|
||||
createAndWaitHostIPCPod(busyboxPodName, true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
|
||||
createAndWaitHostIPCPod(ipcutilsPodName, true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err)
|
||||
}
|
||||
|
||||
podSharedMemoryIDs := strings.TrimSpace(logs)
|
||||
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, busyboxPodName)
|
||||
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName)
|
||||
if !strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
|
||||
framework.Failf("hostIPC container should show shared memory IDs on host")
|
||||
}
|
||||
})
|
||||
|
||||
It("should not show the shared memory ID in the non-hostIPC containers", func() {
|
||||
busyboxPodName := "busybox-non-hostipc-" + string(uuid.NewUUID())
|
||||
createAndWaitHostIPCPod(busyboxPodName, false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
|
||||
createAndWaitHostIPCPod(ipcutilsPodName, false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err)
|
||||
}
|
||||
|
||||
podSharedMemoryIDs := strings.TrimSpace(logs)
|
||||
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, busyboxPodName)
|
||||
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName)
|
||||
if strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
|
||||
framework.Failf("non-hostIPC container should not show shared memory IDs on host")
|
||||
}
|
||||
@ -373,6 +373,18 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
if !isSupported {
|
||||
framework.Skipf("Skipping because no_new_privs is not supported in this docker")
|
||||
}
|
||||
// It turns out SELinux policy in RHEL 7 does not play well with
|
||||
// the "NoNewPrivileges" flag. So let's skip this test when running
|
||||
// with SELinux support enabled.
|
||||
//
|
||||
// TODO(filbranden): Remove this after the fix for
|
||||
// https://github.com/projectatomic/container-selinux/issues/45
|
||||
// has been backported to RHEL 7 (expected on RHEL 7.5)
|
||||
selinuxEnabled, err := isDockerSELinuxSupportEnabled()
|
||||
framework.ExpectNoError(err)
|
||||
if selinuxEnabled {
|
||||
framework.Skipf("Skipping because Docker daemon is running with SELinux support enabled")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e_node/services/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e_node/services/BUILD
generated
vendored
@ -22,9 +22,13 @@ go_library(
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//cmd/kubelet/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/controller/namespace:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
|
||||
@ -33,8 +37,12 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e_node/services/apiserver.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e_node/services/apiserver.go
generated
vendored
@ -52,6 +52,7 @@ func (a *APIServer) Start() error {
|
||||
}
|
||||
config.ServiceClusterIPRange = *ipnet
|
||||
config.AllowPrivileged = true
|
||||
config.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go
generated
vendored
@ -77,6 +77,7 @@ func NewEtcd(dataDir string) *EtcdServer {
|
||||
MaxWALFiles: maxWALFiles,
|
||||
TickMs: tickMs,
|
||||
ElectionTicks: electionTicks,
|
||||
AuthToken: "simple",
|
||||
}
|
||||
|
||||
return &EtcdServer{
|
||||
|
265
vendor/k8s.io/kubernetes/test/e2e_node/services/kubelet.go
generated
vendored
265
vendor/k8s.io/kubernetes/test/e2e_node/services/kubelet.go
generated
vendored
@ -25,11 +25,20 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilflag "k8s.io/apiserver/pkg/util/flag"
|
||||
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
)
|
||||
@ -60,9 +69,15 @@ func (a *args) Set(value string) error {
|
||||
|
||||
// kubeletArgs is the override kubelet args specified by the test runner.
|
||||
var kubeletArgs args
|
||||
var kubeletContainerized bool
|
||||
var hyperkubeImage string
|
||||
var genKubeletConfigFile bool
|
||||
|
||||
func init() {
|
||||
flag.Var(&kubeletArgs, "kubelet-flags", "Kubelet flags passed to kubelet, this will override default kubelet flags in the test. Flags specified in multiple kubelet-flags will be concatenate.")
|
||||
flag.BoolVar(&kubeletContainerized, "kubelet-containerized", false, "Run kubelet in a docker container")
|
||||
flag.StringVar(&hyperkubeImage, "hyperkube-image", "", "Docker image with containerized kubelet")
|
||||
flag.BoolVar(&genKubeletConfigFile, "generate-kubelet-config-file", true, "The test runner will generate a Kubelet config file containing test defaults instead of passing default flags to the Kubelet.")
|
||||
}
|
||||
|
||||
// RunKubelet starts kubelet and waits for termination signal. Once receives the
|
||||
@ -93,10 +108,14 @@ const (
|
||||
// startKubelet starts the Kubelet in a separate process or returns an error
|
||||
// if the Kubelet fails to start.
|
||||
func (e *E2EServices) startKubelet() (*server, error) {
|
||||
if kubeletContainerized && hyperkubeImage == "" {
|
||||
return nil, fmt.Errorf("the --hyperkube-image option must be set")
|
||||
}
|
||||
|
||||
glog.Info("Starting kubelet")
|
||||
|
||||
// set feature gates so we can check which features are enabled and pass the appropriate flags
|
||||
utilfeature.DefaultFeatureGate.Set(framework.TestContext.FeatureGates)
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(framework.TestContext.FeatureGates)
|
||||
|
||||
// Build kubeconfig
|
||||
kubeconfigPath, err := createKubeconfigCWD()
|
||||
@ -104,16 +123,71 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create pod manifest path
|
||||
manifestPath, err := createPodManifestDirectory()
|
||||
// KubeletConfiguration file path
|
||||
kubeletConfigPath, err := kubeletConfigCWDPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.rmDirs = append(e.rmDirs, manifestPath)
|
||||
|
||||
// Create pod directory
|
||||
podPath, err := createPodDirectory()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.rmDirs = append(e.rmDirs, podPath)
|
||||
err = createRootDirectory(KubeletRootDirectory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// PLEASE NOTE: If you set new KubeletConfiguration values or stop setting values here,
|
||||
// you must also update the flag names in kubeletConfigFlags!
|
||||
kubeletConfigFlags := []string{}
|
||||
|
||||
// set up the default kubeletconfiguration
|
||||
kc, err := options.NewKubeletConfiguration()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kc.CgroupRoot = "/"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "cgroup-root")
|
||||
|
||||
kc.VolumeStatsAggPeriod = metav1.Duration{Duration: 10 * time.Second} // Aggregate volumes frequently so tests don't need to wait as long
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "volume-stats-agg-period")
|
||||
|
||||
kc.SerializeImagePulls = false
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "serialize-image-pulls")
|
||||
|
||||
kc.StaticPodPath = podPath
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "pod-manifest-path")
|
||||
|
||||
kc.FileCheckFrequency = metav1.Duration{Duration: 10 * time.Second} // Check file frequently so tests won't wait too long
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "file-check-frequency")
|
||||
|
||||
// Assign a fixed CIDR to the node because there is no node controller.
|
||||
// Note: this MUST be in sync with with the IP in
|
||||
// - cluster/gce/config-test.sh and
|
||||
// - test/e2e_node/conformance/run_test.sh.
|
||||
kc.PodCIDR = "10.100.0.0/24"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "pod-cidr")
|
||||
|
||||
kc.EvictionPressureTransitionPeriod = metav1.Duration{Duration: 30 * time.Second}
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "eviction-pressure-transition-period")
|
||||
|
||||
kc.EvictionHard = map[string]string{
|
||||
"memory.available": "250Mi",
|
||||
"nodefs.available": "10%",
|
||||
"nodefs.inodesFree": "5%",
|
||||
}
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "eviction-hard")
|
||||
|
||||
kc.EvictionMinimumReclaim = map[string]string{
|
||||
"nodefs.available": "5%",
|
||||
"nodefs.inodesFree": "5%",
|
||||
}
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "eviction-minimum-reclaim")
|
||||
|
||||
var killCommand, restartCommand *exec.Cmd
|
||||
var isSystemd bool
|
||||
// Apply default kubelet flags.
|
||||
@ -125,54 +199,77 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
// sense to test it that way
|
||||
isSystemd = true
|
||||
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
|
||||
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit", builder.GetKubeletServerBin())
|
||||
if kubeletContainerized {
|
||||
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit",
|
||||
"/usr/bin/docker", "run", "--name=kubelet",
|
||||
"--rm", "--privileged", "--net=host", "--pid=host",
|
||||
"-e HOST=/rootfs", "-e HOST_ETC=/host-etc",
|
||||
"-v", "/etc/localtime:/etc/localtime:ro",
|
||||
"-v", "/etc/machine-id:/etc/machine-id:ro",
|
||||
"-v", filepath.Dir(kubeconfigPath)+":/etc/kubernetes",
|
||||
"-v", "/:/rootfs:ro,rslave",
|
||||
"-v", "/run:/run",
|
||||
"-v", "/sys/fs/cgroup:/sys/fs/cgroup:rw",
|
||||
"-v", "/sys:/sys:rw",
|
||||
"-v", "/usr/bin/docker:/usr/bin/docker:ro",
|
||||
"-v", "/var/lib/cni:/var/lib/cni",
|
||||
"-v", "/var/lib/docker:/var/lib/docker",
|
||||
"-v", "/var/lib/kubelet:/var/lib/kubelet:rw,rslave",
|
||||
"-v", "/var/log:/var/log",
|
||||
"-v", podPath+":"+podPath+":rw",
|
||||
)
|
||||
|
||||
// if we will generate a kubelet config file, we need to mount that path into the container too
|
||||
if genKubeletConfigFile {
|
||||
cmdArgs = append(cmdArgs, "-v", filepath.Dir(kubeletConfigPath)+":"+filepath.Dir(kubeletConfigPath)+":ro")
|
||||
}
|
||||
|
||||
cmdArgs = append(cmdArgs, hyperkubeImage, "/hyperkube", "kubelet", "--containerized")
|
||||
kubeconfigPath = "/etc/kubernetes/kubeconfig"
|
||||
} else {
|
||||
cmdArgs = append(cmdArgs,
|
||||
systemdRun,
|
||||
"--unit="+unitName,
|
||||
"--slice=runtime.slice",
|
||||
"--remain-after-exit",
|
||||
builder.GetKubeletServerBin())
|
||||
}
|
||||
|
||||
killCommand = exec.Command("systemctl", "kill", unitName)
|
||||
restartCommand = exec.Command("systemctl", "restart", unitName)
|
||||
e.logs["kubelet.log"] = LogFileData{
|
||||
Name: "kubelet.log",
|
||||
JournalctlCommand: []string{"-u", unitName},
|
||||
}
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--kubelet-cgroups=/kubelet.slice",
|
||||
"--cgroup-root=/",
|
||||
)
|
||||
|
||||
kc.KubeletCgroups = "/kubelet.slice"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "kubelet-cgroups")
|
||||
} else {
|
||||
cmdArgs = append(cmdArgs, builder.GetKubeletServerBin())
|
||||
cmdArgs = append(cmdArgs,
|
||||
// TODO(random-liu): Get rid of this docker specific thing.
|
||||
"--runtime-cgroups=/docker-daemon",
|
||||
"--kubelet-cgroups=/kubelet",
|
||||
"--cgroup-root=/",
|
||||
"--system-cgroups=/system",
|
||||
)
|
||||
// TODO(random-liu): Get rid of this docker specific thing.
|
||||
cmdArgs = append(cmdArgs, "--runtime-cgroups=/docker-daemon")
|
||||
|
||||
kc.KubeletCgroups = "/kubelet"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "kubelet-cgroups")
|
||||
|
||||
kc.SystemCgroups = "/system"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "system-cgroups")
|
||||
}
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--kubeconfig", kubeconfigPath,
|
||||
"--address", "0.0.0.0",
|
||||
"--port", kubeletPort,
|
||||
"--read-only-port", kubeletReadOnlyPort,
|
||||
"--root-dir", KubeletRootDirectory,
|
||||
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
|
||||
"--allow-privileged", "true",
|
||||
"--serialize-image-pulls", "false",
|
||||
"--pod-manifest-path", manifestPath,
|
||||
"--file-check-frequency", "10s", // Check file frequently so tests won't wait too long
|
||||
"--docker-disable-shared-pid=false",
|
||||
// Assign a fixed CIDR to the node because there is no node controller.
|
||||
//
|
||||
// Note: this MUST be in sync with with the IP in
|
||||
// - cluster/gce/config-test.sh and
|
||||
// - test/e2e_node/conformance/run_test.sh.
|
||||
"--pod-cidr", "10.100.0.0/24",
|
||||
"--eviction-pressure-transition-period", "30s",
|
||||
// Apply test framework feature gates by default. This could also be overridden
|
||||
// by kubelet-flags.
|
||||
"--feature-gates", framework.TestContext.FeatureGates,
|
||||
"--eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", // The hard eviction thresholds.
|
||||
"--eviction-minimum-reclaim", "nodefs.available=5%,nodefs.inodesFree=5%", // The minimum reclaimed resources after eviction.
|
||||
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
|
||||
"--allow-privileged", "true",
|
||||
)
|
||||
|
||||
// Apply test framework feature gates by default. This could also be overridden
|
||||
// by kubelet-flags.
|
||||
if len(framework.TestContext.FeatureGates) > 0 {
|
||||
cmdArgs = append(cmdArgs, "--feature-gates", utilflag.NewMapStringBool(&framework.TestContext.FeatureGates).String())
|
||||
kc.FeatureGates = framework.TestContext.FeatureGates
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
// Enable dynamic config if the feature gate is enabled
|
||||
dynamicConfigDir, err := getDynamicConfigDir()
|
||||
@ -203,6 +300,30 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
|
||||
}
|
||||
|
||||
if framework.TestContext.ContainerRuntime != "" {
|
||||
cmdArgs = append(cmdArgs, "--container-runtime", framework.TestContext.ContainerRuntime)
|
||||
}
|
||||
|
||||
if framework.TestContext.ContainerRuntimeEndpoint != "" {
|
||||
cmdArgs = append(cmdArgs, "--container-runtime-endpoint", framework.TestContext.ContainerRuntimeEndpoint)
|
||||
}
|
||||
|
||||
if framework.TestContext.ImageServiceEndpoint != "" {
|
||||
cmdArgs = append(cmdArgs, "--image-service-endpoint", framework.TestContext.ImageServiceEndpoint)
|
||||
}
|
||||
|
||||
// Write config file or flags, depending on whether --generate-kubelet-config-file was provided
|
||||
if genKubeletConfigFile {
|
||||
if err := writeKubeletConfigFile(kc, kubeletConfigPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// add the flag to load config from a file
|
||||
cmdArgs = append(cmdArgs, "--config", kubeletConfigPath)
|
||||
} else {
|
||||
// generate command line flags from the default config, since --generate-kubelet-config-file was not provided
|
||||
addKubeletConfigFlags(&cmdArgs, kc, kubeletConfigFlags)
|
||||
}
|
||||
|
||||
// Override the default kubelet flags.
|
||||
cmdArgs = append(cmdArgs, kubeletArgs...)
|
||||
|
||||
@ -224,15 +345,70 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
return server, server.start()
|
||||
}
|
||||
|
||||
// createPodManifestDirectory creates pod manifest directory.
|
||||
func createPodManifestDirectory() (string, error) {
|
||||
// addKubeletConfigFlags adds the flags we care about from the provided kubelet configuration object
|
||||
func addKubeletConfigFlags(cmdArgs *[]string, kc *kubeletconfig.KubeletConfiguration, flags []string) {
|
||||
fs := pflag.NewFlagSet("kubelet", pflag.ExitOnError)
|
||||
options.AddKubeletConfigFlags(fs, kc)
|
||||
for _, name := range flags {
|
||||
*cmdArgs = append(*cmdArgs, "--"+name, fs.Lookup(name).Value.String())
|
||||
}
|
||||
}
|
||||
|
||||
// writeKubeletConfigFile writes the kubelet config file based on the args and returns the filename
|
||||
func writeKubeletConfigFile(internal *kubeletconfig.KubeletConfiguration, path string) error {
|
||||
// extract the KubeletConfiguration and convert to versioned
|
||||
versioned := &v1beta1.KubeletConfiguration{}
|
||||
scheme, _, err := scheme.NewSchemeAndCodecs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := scheme.Convert(internal, versioned, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
// encode
|
||||
encoder, err := newKubeletConfigJSONEncoder()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := runtime.Encode(encoder, versioned)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create the directory, if it does not exist
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
// write the file
|
||||
if err := ioutil.WriteFile(path, data, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newKubeletConfigJSONEncoder() (runtime.Encoder, error) {
|
||||
_, kubeletCodecs, err := scheme.NewSchemeAndCodecs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mediaType := "application/json"
|
||||
info, ok := runtime.SerializerInfoForMediaType(kubeletCodecs.SupportedMediaTypes(), mediaType)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported media type %q", mediaType)
|
||||
}
|
||||
return kubeletCodecs.EncoderForVersion(info.Serializer, v1beta1.SchemeGroupVersion), nil
|
||||
}
|
||||
|
||||
// createPodDirectory creates pod directory.
|
||||
func createPodDirectory() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get current working directory: %v", err)
|
||||
}
|
||||
path, err := ioutil.TempDir(cwd, "pod-manifest")
|
||||
path, err := ioutil.TempDir(cwd, "static-pods")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create static pod manifest directory: %v", err)
|
||||
return "", fmt.Errorf("failed to create static pod directory: %v", err)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
@ -280,6 +456,15 @@ func kubeconfigCWDPath() (string, error) {
|
||||
return filepath.Join(cwd, "kubeconfig"), nil
|
||||
}
|
||||
|
||||
func kubeletConfigCWDPath() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get current working directory: %v", err)
|
||||
}
|
||||
// DO NOT name this file "kubelet" - you will overwrite the the kubelet binary and be very confused :)
|
||||
return filepath.Join(cwd, "kubelet-config"), nil
|
||||
}
|
||||
|
||||
// like createKubeconfig, but creates kubeconfig at current-working-directory/kubeconfig
|
||||
// returns a fully-qualified path to the kubeconfig file
|
||||
func createKubeconfigCWD() (string, error) {
|
||||
|
45
vendor/k8s.io/kubernetes/test/e2e_node/services/server.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e_node/services/server.go
generated
vendored
@ -94,51 +94,6 @@ func (s *server) String() string {
|
||||
commandToString(s.startCommand), commandToString(s.killCommand), commandToString(s.restartCommand), s.healthCheckUrls, s.outFilename)
|
||||
}
|
||||
|
||||
// readinessCheck checks whether services are ready via the supplied health
|
||||
// check URLs. Once there is an error in errCh, the function will stop waiting
|
||||
// and return the error.
|
||||
// TODO(random-liu): Move this to util
|
||||
func readinessCheck(name string, urls []string, errCh <-chan error) error {
|
||||
glog.Infof("Running readiness check for service %q", name)
|
||||
endTime := time.Now().Add(*serverStartTimeout)
|
||||
blockCh := make(chan error)
|
||||
defer close(blockCh)
|
||||
for endTime.After(time.Now()) {
|
||||
select {
|
||||
// We *always* want to run the health check if there is no error on the channel.
|
||||
// With systemd, reads from errCh report nil because cmd.Run() waits
|
||||
// on systemd-run, rather than the service process. systemd-run quickly
|
||||
// exits with status 0, causing the channel to be closed with no error. In
|
||||
// this case, you want to wait for the health check to complete, rather
|
||||
// than returning from readinessCheck as soon as the channel is closed.
|
||||
case err, ok := <-errCh:
|
||||
if ok { // The channel is not closed, this is a real error
|
||||
if err != nil { // If there is an error, return it
|
||||
return err
|
||||
}
|
||||
// If not, keep checking readiness.
|
||||
} else { // The channel is closed, this is only a zero value.
|
||||
// Replace the errCh with blockCh to avoid busy loop,
|
||||
// and keep checking readiness.
|
||||
errCh = blockCh
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
ready := true
|
||||
for _, url := range urls {
|
||||
resp, err := http.Head(url)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
ready = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if ready {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("e2e service %q readiness check timeout %v", name, *serverStartTimeout)
|
||||
}
|
||||
|
||||
// start starts the server by running its commands, monitors it with a health
|
||||
// check, and ensures that it is restarted if applicable.
|
||||
//
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/services/services.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/services/services.go
generated
vendored
@ -108,7 +108,7 @@ func (e *E2EServices) Stop() {
|
||||
func RunE2EServices() {
|
||||
// Populate global DefaultFeatureGate with value from TestContext.FeatureGates.
|
||||
// This way, statically-linked components see the same feature gate config as the test context.
|
||||
utilfeature.DefaultFeatureGate.Set(framework.TestContext.FeatureGates)
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(framework.TestContext.FeatureGates)
|
||||
e := newE2EServices()
|
||||
if err := e.run(); err != nil {
|
||||
glog.Fatalf("Failed to run e2e services: %v", err)
|
||||
|
48
vendor/k8s.io/kubernetes/test/e2e_node/services/util.go
generated
vendored
48
vendor/k8s.io/kubernetes/test/e2e_node/services/util.go
generated
vendored
@ -17,9 +17,13 @@ limitations under the License.
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// terminationSignals are signals that cause the program to exit in the
|
||||
@ -32,3 +36,47 @@ func waitForTerminationSignal() {
|
||||
signal.Notify(sig, terminationSignals...)
|
||||
<-sig
|
||||
}
|
||||
|
||||
// readinessCheck checks whether services are ready via the supplied health
|
||||
// check URLs. Once there is an error in errCh, the function will stop waiting
|
||||
// and return the error.
|
||||
func readinessCheck(name string, urls []string, errCh <-chan error) error {
|
||||
glog.Infof("Running readiness check for service %q", name)
|
||||
endTime := time.Now().Add(*serverStartTimeout)
|
||||
blockCh := make(chan error)
|
||||
defer close(blockCh)
|
||||
for endTime.After(time.Now()) {
|
||||
select {
|
||||
// We *always* want to run the health check if there is no error on the channel.
|
||||
// With systemd, reads from errCh report nil because cmd.Run() waits
|
||||
// on systemd-run, rather than the service process. systemd-run quickly
|
||||
// exits with status 0, causing the channel to be closed with no error. In
|
||||
// this case, you want to wait for the health check to complete, rather
|
||||
// than returning from readinessCheck as soon as the channel is closed.
|
||||
case err, ok := <-errCh:
|
||||
if ok { // The channel is not closed, this is a real error
|
||||
if err != nil { // If there is an error, return it
|
||||
return err
|
||||
}
|
||||
// If not, keep checking readiness.
|
||||
} else { // The channel is closed, this is only a zero value.
|
||||
// Replace the errCh with blockCh to avoid busy loop,
|
||||
// and keep checking readiness.
|
||||
errCh = blockCh
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
ready := true
|
||||
for _, url := range urls {
|
||||
resp, err := http.Head(url)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
ready = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if ready {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("e2e service %q readiness check timeout %v", name, *serverStartTimeout)
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/simple_mount.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/simple_mount.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -42,7 +43,7 @@ var _ = framework.KubeDescribe("SimpleMount", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "simple-mount-container",
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "simply-mounted-volume",
|
||||
|
43
vendor/k8s.io/kubernetes/test/e2e_node/summary_test.go
generated
vendored
43
vendor/k8s.io/kubernetes/test/e2e_node/summary_test.go
generated
vendored
@ -75,7 +75,11 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
maxStartAge = time.Hour * 24 * 365 // 1 year
|
||||
maxStatsAge = time.Minute
|
||||
)
|
||||
fsCapacityBounds := bounded(100*framework.Mb, 100*framework.Gb)
|
||||
// fetch node so we can know proper node memory bounds for unconstrained cgroups
|
||||
node := getLocalNode(f)
|
||||
memoryCapacity := node.Status.Capacity["memory"]
|
||||
memoryLimit := memoryCapacity.Value()
|
||||
fsCapacityBounds := bounded(100*framework.Mb, 10*framework.Tb)
|
||||
// Expectations for system containers.
|
||||
sysContExpectations := func() types.GomegaMatcher {
|
||||
return gstruct.MatchAllFields(gstruct.Fields{
|
||||
@ -90,10 +94,10 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
"Time": recent(maxStatsAge),
|
||||
// We don't limit system container memory.
|
||||
"AvailableBytes": BeNil(),
|
||||
"UsageBytes": bounded(1*framework.Mb, 10*framework.Gb),
|
||||
"WorkingSetBytes": bounded(1*framework.Mb, 10*framework.Gb),
|
||||
"UsageBytes": bounded(1*framework.Mb, memoryLimit),
|
||||
"WorkingSetBytes": bounded(1*framework.Mb, memoryLimit),
|
||||
// this now returns /sys/fs/cgroup/memory.stat total_rss
|
||||
"RSSBytes": bounded(1*framework.Mb, 1*framework.Gb),
|
||||
"RSSBytes": bounded(1*framework.Mb, memoryLimit),
|
||||
"PageFaults": bounded(1000, 1E9),
|
||||
"MajorPageFaults": bounded(0, 100000),
|
||||
}),
|
||||
@ -103,9 +107,21 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
"UserDefinedMetrics": BeEmpty(),
|
||||
})
|
||||
}
|
||||
podsContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
|
||||
podsContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
// Pods are limited by Node Allocatable
|
||||
"AvailableBytes": bounded(1*framework.Kb, memoryLimit),
|
||||
"UsageBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||
"WorkingSetBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||
"RSSBytes": bounded(1*framework.Kb, 20*framework.Mb),
|
||||
"PageFaults": bounded(0, 1000000),
|
||||
"MajorPageFaults": bounded(0, 10),
|
||||
})
|
||||
systemContainers := gstruct.Elements{
|
||||
"kubelet": sysContExpectations(),
|
||||
"runtime": sysContExpectations(),
|
||||
"pods": podsContExpectations,
|
||||
}
|
||||
// The Kubelet only manages the 'misc' system container if the host is not running systemd.
|
||||
if !systemdutil.IsRunningSystemd() {
|
||||
@ -116,9 +132,9 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
"Time": recent(maxStatsAge),
|
||||
// We don't limit system container memory.
|
||||
"AvailableBytes": BeNil(),
|
||||
"UsageBytes": bounded(100*framework.Kb, 10*framework.Gb),
|
||||
"WorkingSetBytes": bounded(100*framework.Kb, 10*framework.Gb),
|
||||
"RSSBytes": bounded(100*framework.Kb, 1*framework.Gb),
|
||||
"UsageBytes": bounded(100*framework.Kb, memoryLimit),
|
||||
"WorkingSetBytes": bounded(100*framework.Kb, memoryLimit),
|
||||
"RSSBytes": bounded(100*framework.Kb, memoryLimit),
|
||||
"PageFaults": bounded(1000, 1E9),
|
||||
"MajorPageFaults": bounded(0, 100000),
|
||||
})
|
||||
@ -231,11 +247,11 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
}),
|
||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": bounded(100*framework.Mb, 100*framework.Gb),
|
||||
"UsageBytes": bounded(10*framework.Mb, 10*framework.Gb),
|
||||
"WorkingSetBytes": bounded(10*framework.Mb, 10*framework.Gb),
|
||||
"AvailableBytes": bounded(100*framework.Mb, memoryLimit),
|
||||
"UsageBytes": bounded(10*framework.Mb, memoryLimit),
|
||||
"WorkingSetBytes": bounded(10*framework.Mb, memoryLimit),
|
||||
// this now returns /sys/fs/cgroup/memory.stat total_rss
|
||||
"RSSBytes": bounded(1*framework.Kb, 1*framework.Gb),
|
||||
"RSSBytes": bounded(1*framework.Kb, memoryLimit),
|
||||
"PageFaults": bounded(1000, 1E9),
|
||||
"MajorPageFaults": bounded(0, 100000),
|
||||
}),
|
||||
@ -273,6 +289,11 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
"InodesUsed": bounded(0, 1E8),
|
||||
}),
|
||||
}),
|
||||
"Rlimit": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"MaxPID": bounded(0, 1E8),
|
||||
"NumOfRunningProcesses": bounded(0, 1E8),
|
||||
}),
|
||||
}),
|
||||
// Ignore extra pods since the tests run in parallel.
|
||||
"Pods": gstruct.MatchElements(summaryObjectID, gstruct.IgnoreExtras, gstruct.Elements{
|
||||
|
36
vendor/k8s.io/kubernetes/test/e2e_node/system/BUILD
generated
vendored
36
vendor/k8s.io/kubernetes/test/e2e_node/system/BUILD
generated
vendored
@ -17,10 +17,39 @@ go_library(
|
||||
"package_validator.go",
|
||||
"report.go",
|
||||
"types.go",
|
||||
"types_unix.go",
|
||||
"validators.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"types_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
@ -45,8 +74,7 @@ go_test(
|
||||
"os_validator_test.go",
|
||||
"package_validator_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/system",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["e2e"],
|
||||
deps = [
|
||||
"//vendor/github.com/docker/docker/api/types:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e_node/system/specs/gke.yaml
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e_node/system/specs/gke.yaml
generated
vendored
@ -220,7 +220,6 @@ packageSpecs:
|
||||
versionRange: '>=4.2.0'
|
||||
- name: less
|
||||
versionRange: '>=481'
|
||||
- name: linux-headers-${KERNEL_RELEASE}
|
||||
- name: netcat-openbsd
|
||||
versionRange: '>=1.10'
|
||||
- name: python
|
||||
@ -235,8 +234,6 @@ packageSpecs:
|
||||
versionRange: '>=1.28'
|
||||
- name: util-linux
|
||||
versionRange: '>=2.27.1'
|
||||
- name: vim
|
||||
versionRange: '>=7.4.712'
|
||||
- name: wget
|
||||
versionRange: '>=1.18'
|
||||
- name: gce-compute-image-packages
|
||||
|
66
vendor/k8s.io/kubernetes/test/e2e_node/util.go
generated
vendored
66
vendor/k8s.io/kubernetes/test/e2e_node/util.go
generated
vendored
@ -23,23 +23,26 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
|
||||
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
@ -107,6 +110,10 @@ func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ini
|
||||
framework.ExpectNoError(err)
|
||||
newCfg := oldCfg.DeepCopy()
|
||||
updateFunction(newCfg)
|
||||
if apiequality.Semantic.DeepEqual(*newCfg, *oldCfg) {
|
||||
return
|
||||
}
|
||||
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||
})
|
||||
AfterEach(func() {
|
||||
@ -178,7 +185,7 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed trying to get current Kubelet config, will retry, error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(*kubeCfg, *newKubeCfg) {
|
||||
if !apiequality.Semantic.DeepEqual(*kubeCfg, *newKubeCfg) {
|
||||
return fmt.Errorf("still waiting for new configuration to take effect, will continue to watch /configz")
|
||||
}
|
||||
glog.Infof("new configuration has taken effect")
|
||||
@ -212,11 +219,11 @@ func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getConfigOK returns the first NodeCondition in `cs` with Type == apiv1.NodeConfigOK,
|
||||
// getKubeletConfigOkCondition returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getConfigOKCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
func getKubeletConfigOkCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == apiv1.NodeConfigOK {
|
||||
if cs[i].Type == apiv1.NodeKubeletConfigOk {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
@ -225,7 +232,7 @@ func getConfigOKCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
|
||||
// Causes the test to fail, or returns a status 200 response from the /configz endpoint
|
||||
func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Response {
|
||||
endpoint := fmt.Sprintf("http://127.0.0.1:8080/api/v1/proxy/nodes/%s/configz", framework.TestContext.NodeName)
|
||||
endpoint := fmt.Sprintf("http://127.0.0.1:8080/api/v1/nodes/%s/proxy/configz", framework.TestContext.NodeName)
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("GET", endpoint, nil)
|
||||
framework.ExpectNoError(err)
|
||||
@ -250,9 +257,9 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Respon
|
||||
// Decodes the http response from /configz and returns a kubeletconfig.KubeletConfiguration (internal type).
|
||||
func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, error) {
|
||||
// This hack because /configz reports the following structure:
|
||||
// {"kubeletconfig": {the JSON representation of kubeletconfigv1alpha1.KubeletConfiguration}}
|
||||
// {"kubeletconfig": {the JSON representation of kubeletconfigv1beta1.KubeletConfiguration}}
|
||||
type configzWrapper struct {
|
||||
ComponentConfig kubeletconfigv1alpha1.KubeletConfiguration `json:"kubeletconfig"`
|
||||
ComponentConfig kubeletconfigv1beta1.KubeletConfiguration `json:"kubeletconfig"`
|
||||
}
|
||||
|
||||
configz := configzWrapper{}
|
||||
@ -291,7 +298,7 @@ func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfigura
|
||||
scheme, _, err := kubeletscheme.NewSchemeAndCodecs()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
versioned := &kubeletconfigv1alpha1.KubeletConfiguration{}
|
||||
versioned := &kubeletconfigv1beta1.KubeletConfiguration{}
|
||||
err = scheme.Convert(internalKC, versioned, nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -302,7 +309,7 @@ func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfigura
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cmap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: name},
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: name + "-"},
|
||||
Data: map[string]string{
|
||||
"kubelet": string(data),
|
||||
},
|
||||
@ -353,7 +360,7 @@ func newKubeletConfigJSONEncoder() (runtime.Encoder, error) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported media type %q", mediaType)
|
||||
}
|
||||
return kubeletCodecs.EncoderForVersion(info.Serializer, kubeletconfigv1alpha1.SchemeGroupVersion), nil
|
||||
return kubeletCodecs.EncoderForVersion(info.Serializer, kubeletconfigv1beta1.SchemeGroupVersion), nil
|
||||
}
|
||||
|
||||
// runCommand runs the cmd and returns the combined stdout and stderr, or an
|
||||
@ -365,3 +372,38 @@ func runCommand(cmd ...string) (string, error) {
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// getCRIClient connects CRI and returns CRI runtime service clients and image service client.
|
||||
func getCRIClient() (internalapi.RuntimeService, internalapi.ImageManagerService, error) {
|
||||
// connection timeout for CRI service connection
|
||||
const connectionTimeout = 2 * time.Minute
|
||||
runtimeEndpoint := framework.TestContext.ContainerRuntimeEndpoint
|
||||
r, err := remote.NewRemoteRuntimeService(runtimeEndpoint, connectionTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
imageManagerEndpoint := runtimeEndpoint
|
||||
if framework.TestContext.ImageServiceEndpoint != "" {
|
||||
//ImageServiceEndpoint is the same as ContainerRuntimeEndpoint if not
|
||||
//explicitly specified
|
||||
imageManagerEndpoint = framework.TestContext.ImageServiceEndpoint
|
||||
}
|
||||
i, err := remote.NewRemoteImageService(imageManagerEndpoint, connectionTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return r, i, nil
|
||||
}
|
||||
|
||||
// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494
|
||||
func restartKubelet() {
|
||||
stdout, err := exec.Command("sudo", "systemctl", "list-units", "kubelet*", "--state=running").CombinedOutput()
|
||||
framework.ExpectNoError(err)
|
||||
regex := regexp.MustCompile("(kubelet-[0-9]+)")
|
||||
matches := regex.FindStringSubmatch(string(stdout))
|
||||
Expect(len(matches)).NotTo(BeZero())
|
||||
kube := matches[0]
|
||||
framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
|
||||
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout)
|
||||
}
|
||||
|
Reference in New Issue
Block a user