mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
13
vendor/k8s.io/kubernetes/test/e2e_node/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e_node/BUILD
generated
vendored
@ -11,9 +11,7 @@ go_library(
|
||||
"docker_util.go",
|
||||
"framework.go",
|
||||
"gpu_device_plugin.go",
|
||||
"gpus.go",
|
||||
"image_list.go",
|
||||
"simple_mount.go",
|
||||
"util.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
@ -32,10 +30,11 @@ go_library(
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/cm/devicemanager:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/util/codec:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/remote:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
@ -55,7 +54,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
@ -130,6 +128,7 @@ go_test(
|
||||
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/eviction:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/status:go_default_library",
|
||||
@ -138,6 +137,7 @@ go_test(
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e_node/services:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
@ -148,10 +148,13 @@ go_test(
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/gstruct:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
@ -159,11 +162,11 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e_node/system:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/apparmor_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/apparmor_test.go
generated
vendored
@ -40,7 +40,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() {
|
||||
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
|
||||
if isAppArmorEnabled() {
|
||||
BeforeEach(func() {
|
||||
By("Loading AppArmor profiles for testing")
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/builder/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/builder/BUILD
generated
vendored
@ -9,7 +9,10 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["build.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/builder",
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
deps = [
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
54
vendor/k8s.io/kubernetes/test/e2e_node/builder/build.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/e2e_node/builder/build.go
generated
vendored
@ -22,10 +22,10 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
var k8sBinDir = flag.String("k8s-bin-dir", "", "Directory containing k8s kubelet binaries.")
|
||||
@ -39,7 +39,7 @@ var buildTargets = []string{
|
||||
|
||||
func BuildGo() error {
|
||||
glog.Infof("Building k8s binaries...")
|
||||
k8sRoot, err := GetK8sRootDir()
|
||||
k8sRoot, err := utils.GetK8sRootDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes root directory %v.", err)
|
||||
}
|
||||
@ -75,7 +75,7 @@ func getK8sBin(bin string) (string, error) {
|
||||
return filepath.Join(path, bin), nil
|
||||
}
|
||||
|
||||
buildOutputDir, err := GetK8sBuildOutputDir()
|
||||
buildOutputDir, err := utils.GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -84,53 +84,7 @@ func getK8sBin(bin string) (string, error) {
|
||||
}
|
||||
|
||||
// Give up with error
|
||||
return "", fmt.Errorf("Unable to locate %s. Can be defined using --k8s-path.", bin)
|
||||
}
|
||||
|
||||
// GetK8sRootDir returns the root directory for kubernetes, if present in the gopath.
|
||||
func GetK8sRootDir() (string, error) {
|
||||
dir, err := RootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, fmt.Sprintf("%s/", "k8s.io/kubernetes")), nil
|
||||
}
|
||||
|
||||
// GetCAdvisorRootDir returns the root directory for cAdvisor, if present in the gopath.
|
||||
func GetCAdvisorRootDir() (string, error) {
|
||||
dir, err := RootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, fmt.Sprintf("%s/", "github.com/google/cadvisor")), nil
|
||||
}
|
||||
|
||||
// TODO: Dedup / merge this with comparable utilities in e2e/util.go
|
||||
// RootDir returns the path to the directory containing the k8s.io directory
|
||||
func RootDir() (string, error) {
|
||||
// Get the directory of the current executable
|
||||
_, testExec, _, _ := runtime.Caller(0)
|
||||
path := filepath.Dir(testExec)
|
||||
|
||||
// Look for the kubernetes source root directory
|
||||
if strings.Contains(path, "k8s.io/kubernetes") {
|
||||
splitPath := strings.Split(path, "k8s.io/kubernetes")
|
||||
return splitPath[0], nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Could not find kubernetes source root directory.")
|
||||
}
|
||||
|
||||
func GetK8sBuildOutputDir() (string, error) {
|
||||
k8sRoot, err := GetK8sRootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buildOutputDir := filepath.Join(k8sRoot, "_output/local/go/bin")
|
||||
if _, err := os.Stat(buildOutputDir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buildOutputDir, nil
|
||||
return "", fmt.Errorf("unable to locate %s, Can be defined using --k8s-path", bin)
|
||||
}
|
||||
|
||||
func GetKubeletServerBin() string {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/conformance/run_test.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/conformance/run_test.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/container_manager_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/container_manager_test.go
generated
vendored
@ -75,7 +75,7 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect
|
||||
|
||||
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-container-manager")
|
||||
Describe("Validate OOM score adjustments", func() {
|
||||
Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
|
||||
Context("once the node is setup", func() {
|
||||
It("container runtime's oom-score-adj should be -999", func() {
|
||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/cpu_manager_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/cpu_manager_test.go
generated
vendored
@ -152,9 +152,6 @@ func setOldKubeletConfig(f *framework.Framework, oldCfg *kubeletconfig.KubeletCo
|
||||
}
|
||||
|
||||
func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||
// Run only if the container runtime is not docker or remote (not rkt).
|
||||
framework.RunIfContainerRuntimeIs("docker", "remote")
|
||||
|
||||
// Enable CPU Manager in Kubelet with static policy.
|
||||
oldCfg, err := getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
@ -441,7 +438,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager]", func() {
|
||||
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager][NodeAlphaFeature:CPUManager]", func() {
|
||||
f := framework.NewDefaultFramework("cpu-manager-test")
|
||||
|
||||
Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/critical_pod_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/critical_pod_test.go
generated
vendored
@ -40,7 +40,7 @@ const (
|
||||
bestEffortPodName = "best-effort"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() {
|
||||
f := framework.NewDefaultFramework("critical-pod-test")
|
||||
|
||||
Context("when we need to admit a critical pod", func() {
|
||||
@ -131,7 +131,7 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
|
33
vendor/k8s.io/kubernetes/test/e2e_node/density_test.go
generated
vendored
33
vendor/k8s.io/kubernetes/test/e2e_node/density_test.go
generated
vendored
@ -154,7 +154,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]", itArg.podsNr, itArg.interval)
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
Context("", func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
||||
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
||||
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
|
||||
@ -273,7 +273,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark]", itArg.podsNr, itArg.bgPodsNr)
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark][NodeSpeicalFeature:Benchmark]", itArg.podsNr, itArg.bgPodsNr)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "sequence"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
@ -332,7 +332,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
)
|
||||
|
||||
// create test pod data structure
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), podType)
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), podType)
|
||||
|
||||
// the controller watches the change of pod status
|
||||
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
||||
@ -413,8 +413,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
||||
podType = "density_test_pod"
|
||||
sleepBeforeCreatePods = 30 * time.Second
|
||||
)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, true, imageutils.GetPauseImageNameForHostArch(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), podType)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, true, imageutils.GetPauseImageName(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), podType)
|
||||
|
||||
By("Creating a batch of background pods")
|
||||
|
||||
@ -472,21 +472,6 @@ func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) {
|
||||
return latencyMetrics, nil
|
||||
}
|
||||
|
||||
// verifyPodStartupLatency verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
|
||||
// within the threshold.
|
||||
func verifyPodStartupLatency(expect, actual framework.LatencyMetric) error {
|
||||
if actual.Perc50 > expect.Perc50 {
|
||||
return fmt.Errorf("too high pod startup latency 50th percentile: %v", actual.Perc50)
|
||||
}
|
||||
if actual.Perc90 > expect.Perc90 {
|
||||
return fmt.Errorf("too high pod startup latency 90th percentile: %v", actual.Perc90)
|
||||
}
|
||||
if actual.Perc99 > expect.Perc99 {
|
||||
return fmt.Errorf("too high pod startup latency 99th percentile: %v", actual.Perc99)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newInformerWatchPod creates an informer to check whether all pods are running.
|
||||
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller {
|
||||
ns := f.Namespace.Name
|
||||
@ -556,14 +541,14 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD
|
||||
latencyMetrics, _ := getPodStartLatency(kubeletAddr)
|
||||
framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics))
|
||||
|
||||
podCreateLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLags)}
|
||||
podStartupLatency := framework.ExtractLatencyMetrics(e2eLags)
|
||||
|
||||
// log latency perf data
|
||||
logPerfData(getLatencyPerfData(podCreateLatency.Latency, testInfo), "latency")
|
||||
logPerfData(getLatencyPerfData(podStartupLatency, testInfo), "latency")
|
||||
|
||||
if isVerify {
|
||||
// check whether e2e pod startup time is acceptable.
|
||||
framework.ExpectNoError(verifyPodStartupLatency(podStartupLimits, podCreateLatency.Latency))
|
||||
framework.ExpectNoError(framework.VerifyLatencyWithinThreshold(podStartupLimits, podStartupLatency, "pod startup"))
|
||||
|
||||
// check bactch pod creation latency
|
||||
if podBatchStartupLimit > 0 {
|
||||
|
137
vendor/k8s.io/kubernetes/test/e2e_node/device_plugin.go
generated
vendored
137
vendor/k8s.io/kubernetes/test/e2e_node/device_plugin.go
generated
vendored
@ -44,7 +44,7 @@ const (
|
||||
)
|
||||
|
||||
// Serial because the test restarts Kubelet
|
||||
var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin]", func() {
|
||||
var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature:DevicePlugin][Serial]", func() {
|
||||
f := framework.NewDefaultFramework("device-plugin-errors")
|
||||
|
||||
Context("DevicePlugin", func() {
|
||||
@ -69,33 +69,41 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin]", func() {
|
||||
|
||||
By("Waiting for the resource exported by the stub device plugin to become available on the local node")
|
||||
devsLen := int64(len(devs))
|
||||
Eventually(func() int64 {
|
||||
Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return numberOfDevices(node, resourceName)
|
||||
}, 30*time.Second, framework.Poll).Should(Equal(devsLen))
|
||||
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
||||
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
||||
}, 30*time.Second, framework.Poll).Should(BeTrue())
|
||||
|
||||
By("Creating one pod on node with at least one fake-device")
|
||||
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs"
|
||||
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
||||
count1, devId1 := parseLogFromNRuns(f, pod1.Name, pod1.Name, 0, deviceIDRE)
|
||||
devId1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devId1).To(Not(Equal("")))
|
||||
|
||||
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Restarting Kubelet and waiting for the current running pod to restart")
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
|
||||
By("Confirming that device assignment persists even after container restart")
|
||||
devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devIdAfterRestart).To(Equal(devId1))
|
||||
|
||||
By("Restarting Kubelet")
|
||||
restartKubelet()
|
||||
|
||||
By("Confirming that after a kubelet and pod restart, fake-device assignement is kept")
|
||||
count1, devIdRestart1 := parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+1, deviceIDRE)
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
By("Confirming that after a kubelet restart, fake-device assignement is kept")
|
||||
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
||||
By("Wait for node is ready")
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||
|
||||
By("Re-Register resources")
|
||||
By("Re-Register resources after kubelet restart")
|
||||
dp1 = dm.NewDevicePluginStub(devs, socketPath)
|
||||
dp1.SetAllocFunc(stubAllocFunc)
|
||||
err = dp1.Start()
|
||||
@ -105,17 +113,18 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for resource to become available on the local node after re-registration")
|
||||
Eventually(func() int64 {
|
||||
Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return numberOfDevices(node, resourceName)
|
||||
}, 30*time.Second, framework.Poll).Should(Equal(devsLen))
|
||||
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
||||
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
||||
}, 30*time.Second, framework.Poll).Should(BeTrue())
|
||||
|
||||
By("Creating another pod")
|
||||
pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
|
||||
By("Checking that pods got a different GPU")
|
||||
count2, devId2 := parseLogFromNRuns(f, pod2.Name, pod2.Name, 1, deviceIDRE)
|
||||
By("Checking that pod got a different fake device")
|
||||
devId2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||
|
||||
Expect(devId1).To(Not(Equal(devId2)))
|
||||
|
||||
@ -123,26 +132,59 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin]", func() {
|
||||
err = dp1.Stop()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for stub device plugin to become unhealthy on the local node")
|
||||
Eventually(func() int64 {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return numberOfDevicesAllocatable(node, resourceName)
|
||||
}, 30*time.Second, framework.Poll).Should(Equal(int64(0)))
|
||||
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
||||
ensurePodContainerRestart(f, pod2.Name, pod2.Name)
|
||||
devIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
|
||||
By("Re-register resources")
|
||||
dp1 = dm.NewDevicePluginStub(devs, socketPath)
|
||||
dp1.SetAllocFunc(stubAllocFunc)
|
||||
err = dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, false)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
|
||||
Eventually(func() int64 {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return numberOfDevicesAllocatable(node, resourceName)
|
||||
}, 30*time.Second, framework.Poll).Should(Equal(devsLen))
|
||||
|
||||
By("Deleting device plugin again.")
|
||||
err = dp1.Stop()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for stub device plugin to become unavailable on the local node")
|
||||
Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return numberOfDevices(node, resourceName) <= 0
|
||||
return numberOfDevicesCapacity(node, resourceName) <= 0
|
||||
}, 10*time.Minute, framework.Poll).Should(BeTrue())
|
||||
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||
count1, devIdRestart1 = parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+1, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 := parseLogFromNRuns(f, pod2.Name, pod2.Name, count2+1, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
|
||||
By("Restarting Kubelet.")
|
||||
By("Restarting Kubelet second time.")
|
||||
restartKubelet()
|
||||
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
|
||||
count1, devIdRestart1 = parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+2, deviceIDRE)
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet Eventually.")
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 = parseLogFromNRuns(f, pod2.Name, pod2.Name, count2+2, deviceIDRE)
|
||||
|
||||
ensurePodContainerRestart(f, pod2.Name, pod2.Name)
|
||||
devIdRestart2 = parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
|
||||
// Cleanup
|
||||
@ -176,21 +218,28 @@ func makeBusyboxPod(resourceName, cmd string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
// parseLogFromNRuns returns restart count of the specified container
|
||||
// after it has been restarted at least restartCount times,
|
||||
// and the matching string for the specified regular expression parsed from the container logs.
|
||||
func parseLogFromNRuns(f *framework.Framework, podName string, contName string, restartCount int32, re string) (int32, string) {
|
||||
var count int32
|
||||
// Wait till pod has been restarted at least restartCount times.
|
||||
// ensurePodContainerRestart confirms that pod container has restarted at least once
|
||||
func ensurePodContainerRestart(f *framework.Framework, podName string, contName string) {
|
||||
var initialCount int32
|
||||
var currentCount int32
|
||||
p, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||
framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
|
||||
}
|
||||
initialCount = p.Status.ContainerStatuses[0].RestartCount
|
||||
Eventually(func() bool {
|
||||
p, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
p, err = f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||
return false
|
||||
}
|
||||
count = p.Status.ContainerStatuses[0].RestartCount
|
||||
return count >= restartCount
|
||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||
currentCount = p.Status.ContainerStatuses[0].RestartCount
|
||||
framework.Logf("initial %v, current %v", initialCount, currentCount)
|
||||
return currentCount > initialCount
|
||||
}, 2*time.Minute, framework.Poll).Should(BeTrue())
|
||||
}
|
||||
|
||||
// parseLog returns the matching string for the specified regular expression parsed from the container logs.
|
||||
func parseLog(f *framework.Framework, podName string, contName string, re string) string {
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||
@ -200,14 +249,14 @@ func parseLogFromNRuns(f *framework.Framework, podName string, contName string,
|
||||
regex := regexp.MustCompile(re)
|
||||
matches := regex.FindStringSubmatch(logs)
|
||||
if len(matches) < 2 {
|
||||
return count, ""
|
||||
return ""
|
||||
}
|
||||
|
||||
return count, matches[1]
|
||||
return matches[1]
|
||||
}
|
||||
|
||||
// numberOfDevices returns the number of devices of resourceName advertised by a node
|
||||
func numberOfDevices(node *v1.Node, resourceName string) int64 {
|
||||
// numberOfDevicesCapacity returns the number of devices of resourceName advertised by a node capacity
|
||||
func numberOfDevicesCapacity(node *v1.Node, resourceName string) int64 {
|
||||
val, ok := node.Status.Capacity[v1.ResourceName(resourceName)]
|
||||
if !ok {
|
||||
return 0
|
||||
@ -216,6 +265,16 @@ func numberOfDevices(node *v1.Node, resourceName string) int64 {
|
||||
return val.Value()
|
||||
}
|
||||
|
||||
// numberOfDevicesAllocatable returns the number of devices of resourceName advertised by a node allocatable
|
||||
func numberOfDevicesAllocatable(node *v1.Node, resourceName string) int64 {
|
||||
val, ok := node.Status.Allocatable[v1.ResourceName(resourceName)]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
return val.Value()
|
||||
}
|
||||
|
||||
// stubAllocFunc will pass to stub device plugin
|
||||
func stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) {
|
||||
var responses pluginapi.AllocateResponse
|
||||
|
42
vendor/k8s.io/kubernetes/test/e2e_node/docker_test.go
generated
vendored
42
vendor/k8s.io/kubernetes/test/e2e_node/docker_test.go
generated
vendored
@ -30,53 +30,13 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Docker features [Feature:Docker]", func() {
|
||||
var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() {
|
||||
f := framework.NewDefaultFramework("docker-feature-test")
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.RunIfContainerRuntimeIs("docker")
|
||||
})
|
||||
|
||||
Context("when shared PID namespace is enabled", func() {
|
||||
It("processes in different containers of the same pod should be able to see each other", func() {
|
||||
// TODO(yguo0905): Change this test to run unless the runtime is
|
||||
// Docker and its version is <1.13.
|
||||
By("Check whether shared PID namespace is supported.")
|
||||
isEnabled, err := isSharedPIDNamespaceSupported()
|
||||
framework.ExpectNoError(err)
|
||||
if !isEnabled {
|
||||
framework.Skipf("Skipped because shared PID namespace is not supported by this docker version.")
|
||||
}
|
||||
|
||||
By("Create a pod with two containers.")
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container-1",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/top"},
|
||||
},
|
||||
{
|
||||
Name: "test-container-2",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/sleep"},
|
||||
Args: []string{"10000"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("Check if the process in one container is visible to the process in the other.")
|
||||
pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||
pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
|
||||
if pid1 != pid2 {
|
||||
framework.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("when live-restore is enabled [Serial] [Slow] [Disruptive]", func() {
|
||||
It("containers should not be disrupted when the daemon shuts down and restarts", func() {
|
||||
const (
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/dockershim_checkpoint_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/dockershim_checkpoint_test.go
generated
vendored
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -42,7 +43,7 @@ const (
|
||||
testCheckpointContent = `{"version":"v1","name":"fluentd-gcp-v2.0-vmnqx","namespace":"kube-system","data":{},"checksum":1799154314}`
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func() {
|
||||
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Docker]", func() {
|
||||
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
|
||||
|
||||
BeforeEach(func() {
|
||||
@ -155,7 +156,7 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "pause-container",
|
||||
},
|
||||
},
|
||||
|
1097
vendor/k8s.io/kubernetes/test/e2e_node/dynamic_kubelet_config_test.go
generated
vendored
1097
vendor/k8s.io/kubernetes/test/e2e_node/dynamic_kubelet_config_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/k8s.io/kubernetes/test/e2e_node/environment/setup_host.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/environment/setup_host.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
138
vendor/k8s.io/kubernetes/test/e2e_node/eviction_test.go
generated
vendored
138
vendor/k8s.io/kubernetes/test/e2e_node/eviction_test.go
generated
vendored
@ -20,15 +20,18 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -49,18 +52,21 @@ const (
|
||||
// pressure conditions often surface after evictions because the kubelet only updates
|
||||
// node conditions periodically.
|
||||
// we wait this period after evictions to make sure that we wait out this delay
|
||||
pressureDelay = 20 * time.Second
|
||||
testContextFmt = "when we run containers that should cause %s"
|
||||
noPressure = v1.NodeConditionType("NoPressure")
|
||||
lotsOfDisk = 10240 // 10 Gb in Mb
|
||||
lotsOfFiles = 1000000000 // 1 billion
|
||||
pressureDelay = 20 * time.Second
|
||||
testContextFmt = "when we run containers that should cause %s"
|
||||
noPressure = v1.NodeConditionType("NoPressure")
|
||||
lotsOfDisk = 10240 // 10 Gb in Mb
|
||||
lotsOfFiles = 1000000000 // 1 billion
|
||||
resourceInodes = v1.ResourceName("inodes")
|
||||
noStarvedResource = v1.ResourceName("none")
|
||||
)
|
||||
|
||||
// InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods.
|
||||
// Node disk pressure is induced by consuming all inodes on the node.
|
||||
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
|
||||
f := framework.NewDefaultFramework("inode-eviction-test")
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
expectedStarvedResource := resourceInodes
|
||||
pressureTimeout := 15 * time.Minute
|
||||
inodesConsumed := uint64(200000)
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
@ -74,7 +80,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", fun
|
||||
initialConfig.EvictionHard = map[string]string{"nodefs.inodesFree": fmt.Sprintf("%d", inodesFree-inodesConsumed)}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logInodeMetrics, []podEvictSpec{
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logInodeMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: inodeConsumingPod("container-inode-hog", lotsOfFiles, nil),
|
||||
@ -93,10 +99,11 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", fun
|
||||
|
||||
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
|
||||
// Disk pressure is induced by pulling large images
|
||||
var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
|
||||
f := framework.NewDefaultFramework("image-gc-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
expectedStarvedResource := resourceInodes
|
||||
inodesConsumed := uint64(100000)
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
@ -111,7 +118,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]",
|
||||
})
|
||||
// Consume enough inodes to induce disk pressure,
|
||||
// but expect that image garbage collection can reduce it enough to avoid an eviction
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 0,
|
||||
pod: inodeConsumingPod("container-inode", 110000, nil),
|
||||
@ -122,9 +129,10 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]",
|
||||
|
||||
// MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods.
|
||||
// Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
|
||||
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
|
||||
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
|
||||
expectedNodeCondition := v1.NodeMemoryPressure
|
||||
expectedStarvedResource := v1.ResourceMemory
|
||||
pressureTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
@ -139,7 +147,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
|
||||
initialConfig.EnforceNodeAllocatable = []string{kubetypes.NodeAllocatableEnforcementKey}
|
||||
initialConfig.CgroupsPerQOS = true
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, []podEvictSpec{
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
|
||||
@ -154,10 +162,11 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
|
||||
|
||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||
// Disk pressure is induced by running pods which consume disk space.
|
||||
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
diskConsumed := resource.MustParse("100Mi")
|
||||
@ -166,7 +175,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive
|
||||
initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}),
|
||||
@ -182,10 +191,11 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive
|
||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
|
||||
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
|
||||
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
diskConsumed := resource.MustParse("100Mi")
|
||||
@ -203,7 +213,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
|
||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||
initialConfig.EvictionHard = map[string]string{"memory.available": "0%"}
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}),
|
||||
@ -217,7 +227,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
|
||||
})
|
||||
|
||||
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
|
||||
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation]", func() {
|
||||
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
evictionTestTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||
@ -231,7 +241,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
|
||||
useUnderLimit := 99 /* Mb */
|
||||
containerLimit := v1.ResourceList{v1.ResourceEphemeralStorage: sizeLimit}
|
||||
|
||||
runEvictionTest(f, evictionTestTimeout, noPressure, logDiskMetrics, []podEvictSpec{
|
||||
runEvictionTest(f, evictionTestTimeout, noPressure, noStarvedResource, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1, // This pod should be evicted because emptyDir (default storage type) usage violation
|
||||
pod: diskConsumingPod("emptydir-disk-sizelimit", useOverLimit, &v1.VolumeSource{
|
||||
@ -270,9 +280,10 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
|
||||
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
|
||||
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
||||
// the higher priority pod.
|
||||
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
|
||||
f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test")
|
||||
expectedNodeCondition := v1.NodeMemoryPressure
|
||||
expectedStarvedResource := v1.ResourceMemory
|
||||
pressureTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
@ -309,16 +320,17 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||
}
|
||||
systemPriority := int32(2147483647)
|
||||
specs[1].pod.Spec.Priority = &systemPriority
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, specs)
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, specs)
|
||||
})
|
||||
})
|
||||
|
||||
// PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods.
|
||||
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
||||
// the higher priority pod.
|
||||
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
|
||||
f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test")
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||
pressureTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
@ -357,7 +369,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||
}
|
||||
systemPriority := int32(2147483647)
|
||||
specs[1].pod.Spec.Priority = &systemPriority
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, specs)
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, specs)
|
||||
})
|
||||
})
|
||||
|
||||
@ -376,10 +388,12 @@ type podEvictSpec struct {
|
||||
// It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.)
|
||||
// It ensures that all pods with non-zero evictionPriority are eventually evicted.
|
||||
// runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists
|
||||
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, logFunc func(), testSpecs []podEvictSpec) {
|
||||
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(), testSpecs []podEvictSpec) {
|
||||
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
||||
Context("", func() {
|
||||
BeforeEach(func() {
|
||||
// reduce memory usage in the allocatable cgroup to ensure we do not have MemoryPressure
|
||||
reduceAllocatableMemoryUsage()
|
||||
// Nodes do not immediately report local storage capacity
|
||||
// Sleep so that pods requesting local storage do not fail to schedule
|
||||
time.Sleep(30 * time.Second)
|
||||
@ -409,7 +423,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
framework.Logf("Node does NOT have %s", expectedNodeCondition)
|
||||
}
|
||||
}
|
||||
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
logFunc()
|
||||
return verifyEvictionOrdering(f, testSpecs)
|
||||
}, pressureTimeout, evictionPollInterval).Should(BeNil())
|
||||
@ -423,7 +437,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition))
|
||||
Eventually(func() error {
|
||||
logFunc()
|
||||
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||
return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition)
|
||||
}
|
||||
@ -436,9 +450,12 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
return fmt.Errorf("%s dissappeared and then reappeared", expectedNodeCondition)
|
||||
}
|
||||
logFunc()
|
||||
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
return verifyEvictionOrdering(f, testSpecs)
|
||||
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil())
|
||||
|
||||
By("checking for correctly formatted eviction events")
|
||||
verifyEvictionEvents(f, testSpecs, expectedStarvedResource)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@ -447,6 +464,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute)
|
||||
}
|
||||
reduceAllocatableMemoryUsage()
|
||||
if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages {
|
||||
// The disk eviction test may cause the prepulled images to be evicted,
|
||||
// prepull those images again to ensure this test not affect following tests.
|
||||
@ -462,7 +480,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
@ -502,6 +520,8 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
|
||||
}
|
||||
}
|
||||
Expect(priorityPod).NotTo(BeNil())
|
||||
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodSucceeded),
|
||||
fmt.Sprintf("pod: %s succeeded unexpectedly", priorityPod.Name))
|
||||
|
||||
// Check eviction ordering.
|
||||
// Note: it is alright for a priority 1 and priority 2 pod (for example) to fail in the same round,
|
||||
@ -521,6 +541,11 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
|
||||
}
|
||||
}
|
||||
|
||||
if priorityPod.Status.Phase == v1.PodFailed {
|
||||
Expect(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
|
||||
priorityPod.Name, eviction.Reason, priorityPod.Status.Reason)
|
||||
}
|
||||
|
||||
// EvictionPriority 0 pods should not fail
|
||||
if priorityPodSpec.evictionPriority == 0 {
|
||||
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
|
||||
@ -538,6 +563,60 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
|
||||
return fmt.Errorf("pods that should be evicted are still running")
|
||||
}
|
||||
|
||||
func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) {
|
||||
for _, spec := range testSpecs {
|
||||
pod := spec.pod
|
||||
if spec.evictionPriority != 0 {
|
||||
selector := fields.Set{
|
||||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.name": pod.Name,
|
||||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"reason": eviction.Reason,
|
||||
}.AsSelector().String()
|
||||
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{FieldSelector: selector})
|
||||
Expect(err).To(BeNil(), "Unexpected error getting events during eviction test: %v", err)
|
||||
Expect(len(podEvictEvents.Items)).To(Equal(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
|
||||
event := podEvictEvents.Items[0]
|
||||
|
||||
if expectedStarvedResource != noStarvedResource {
|
||||
// Check the eviction.StarvedResourceKey
|
||||
starved, found := event.Annotations[eviction.StarvedResourceKey]
|
||||
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
||||
pod.Name, expectedStarvedResource)
|
||||
starvedResource := v1.ResourceName(starved)
|
||||
Expect(starvedResource).To(Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
|
||||
pod.Name, expectedStarvedResource, starvedResource)
|
||||
|
||||
// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
|
||||
if expectedStarvedResource == v1.ResourceMemory {
|
||||
// Check the eviction.OffendingContainersKey
|
||||
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
|
||||
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
||||
pod.Name)
|
||||
offendingContainers := strings.Split(offendersString, ",")
|
||||
Expect(len(offendingContainers)).To(Equal(1), "Expected to find the offending container's usage in the %s annotation, but no container was found",
|
||||
eviction.OffendingContainersKey)
|
||||
Expect(offendingContainers[0]).To(Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
|
||||
pod.Spec.Containers[0].Name, eviction.OffendingContainersKey, offendingContainers[0])
|
||||
|
||||
// Check the eviction.OffendingContainersUsageKey
|
||||
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
|
||||
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
|
||||
pod.Name)
|
||||
offendingContainersUsage := strings.Split(offendingUsageString, ",")
|
||||
Expect(len(offendingContainersUsage)).To(Equal(1), "Expected to find the offending container's usage in the %s annotation, but found %+v",
|
||||
eviction.OffendingContainersUsageKey, offendingContainersUsage)
|
||||
usageQuantity, err := resource.ParseQuantity(offendingContainersUsage[0])
|
||||
Expect(err).To(BeNil(), "Expected to be able to parse pod %s's %s annotation as a quantity, but got err: %v", pod.Name, eviction.OffendingContainersUsageKey, err)
|
||||
request := pod.Spec.Containers[0].Resources.Requests[starvedResource]
|
||||
Expect(usageQuantity.Cmp(request)).To(Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s",
|
||||
usageQuantity.String(), pod.Name, request.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns TRUE if the node has the node condition, FALSE otherwise
|
||||
func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool {
|
||||
localNodeStatus := getLocalNode(f).Status
|
||||
@ -607,7 +686,12 @@ func logMemoryMetrics() {
|
||||
return
|
||||
}
|
||||
if summary.Node.Memory != nil && summary.Node.Memory.WorkingSetBytes != nil && summary.Node.Memory.AvailableBytes != nil {
|
||||
framework.Logf("Node.Memory.WorkingSetBytes: %d, summary.Node.Memory.AvailableBytes: %d", *summary.Node.Memory.WorkingSetBytes, *summary.Node.Memory.AvailableBytes)
|
||||
framework.Logf("Node.Memory.WorkingSetBytes: %d, Node.Memory.AvailableBytes: %d", *summary.Node.Memory.WorkingSetBytes, *summary.Node.Memory.AvailableBytes)
|
||||
}
|
||||
for _, sysContainer := range summary.Node.SystemContainers {
|
||||
if sysContainer.Name == stats.SystemContainerPods && sysContainer.Memory != nil && sysContainer.Memory.WorkingSetBytes != nil && sysContainer.Memory.AvailableBytes != nil {
|
||||
framework.Logf("Allocatable.Memory.WorkingSetBytes: %d, Allocatable.Memory.AvailableBytes: %d", *sysContainer.Memory.WorkingSetBytes, *sysContainer.Memory.AvailableBytes)
|
||||
}
|
||||
}
|
||||
for _, pod := range summary.Pods {
|
||||
framework.Logf("Pod: %s", pod.PodRef.Name)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/garbage_collector_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/garbage_collector_test.go
generated
vendored
@ -71,7 +71,7 @@ type testRun struct {
|
||||
|
||||
// GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here:
|
||||
// http://kubernetes.io/docs/admin/garbage-collection/
|
||||
var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() {
|
||||
var _ = framework.KubeDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func() {
|
||||
f := framework.NewDefaultFramework("garbage-collect-test")
|
||||
containerNamePrefix := "gc-test-container-"
|
||||
podNamePrefix := "gc-test-pod-"
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/gke_environment_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/gke_environment_test.go
generated
vendored
@ -310,7 +310,7 @@ func checkDockerStorageDriver() error {
|
||||
return fmt.Errorf("failed to find storage driver")
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("GKE system requirements [Conformance] [Feature:GKEEnv]", func() {
|
||||
var _ = framework.KubeDescribe("GKE system requirements [Conformance][NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
|
||||
BeforeEach(func() {
|
||||
framework.RunIfSystemSpecNameIs("gke")
|
||||
})
|
||||
|
37
vendor/k8s.io/kubernetes/test/e2e_node/gpu_device_plugin.go
generated
vendored
37
vendor/k8s.io/kubernetes/test/e2e_node/gpu_device_plugin.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -36,7 +37,7 @@ const (
|
||||
)
|
||||
|
||||
// Serial because the test restarts Kubelet
|
||||
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin] [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeature:GPUDevicePlugin][Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("device-plugin-gpus-errors")
|
||||
|
||||
Context("DevicePlugin", func() {
|
||||
@ -79,7 +80,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
p1 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
|
||||
|
||||
deviceIDRE := "gpu devices: (nvidia[0-9]+)"
|
||||
count1, devId1 := parseLogFromNRuns(f, p1.Name, p1.Name, 1, deviceIDRE)
|
||||
devId1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
||||
p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -87,15 +88,20 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
restartKubelet()
|
||||
|
||||
By("Confirming that after a kubelet and pod restart, GPU assignement is kept")
|
||||
count1, devIdRestart1 := parseLogFromNRuns(f, p1.Name, p1.Name, count1+1, deviceIDRE)
|
||||
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
||||
devIdRestart1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
||||
By("Restarting Kubelet and creating another pod")
|
||||
restartKubelet()
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||
Eventually(func() bool {
|
||||
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||
}, 10*time.Second, framework.Poll).Should(BeTrue())
|
||||
p2 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
|
||||
|
||||
By("Checking that pods got a different GPU")
|
||||
count2, devId2 := parseLogFromNRuns(f, p2.Name, p2.Name, 1, deviceIDRE)
|
||||
devId2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
||||
|
||||
Expect(devId1).To(Not(Equal(devId2)))
|
||||
|
||||
@ -108,16 +114,21 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
return framework.NumberOfNVIDIAGPUs(node) <= 0
|
||||
}, 10*time.Minute, framework.Poll).Should(BeTrue())
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||
count1, devIdRestart1 = parseLogFromNRuns(f, p1.Name, p1.Name, count1+1, deviceIDRE)
|
||||
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
||||
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 := parseLogFromNRuns(f, p2.Name, p2.Name, count2+1, deviceIDRE)
|
||||
|
||||
ensurePodContainerRestart(f, p2.Name, p2.Name)
|
||||
devIdRestart2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
By("Restarting Kubelet.")
|
||||
restartKubelet()
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
|
||||
count1, devIdRestart1 = parseLogFromNRuns(f, p1.Name, p1.Name, count1+2, deviceIDRE)
|
||||
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
||||
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 = parseLogFromNRuns(f, p2.Name, p2.Name, count2+2, deviceIDRE)
|
||||
ensurePodContainerRestart(f, p2.Name, p2.Name)
|
||||
devIdRestart2 = parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
logDevicePluginMetrics()
|
||||
|
||||
@ -128,6 +139,16 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
})
|
||||
})
|
||||
|
||||
func checkIfNvidiaGPUsExistOnNode() bool {
|
||||
// Cannot use `lspci` because it is not installed on all distros by default.
|
||||
err := exec.Command("/bin/sh", "-c", "find /sys/devices/pci* -type f | grep vendor | xargs cat | grep 0x10de").Run()
|
||||
if err != nil {
|
||||
framework.Logf("check for nvidia GPUs failed. Got Error: %v", err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func logDevicePluginMetrics() {
|
||||
ms, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName + ":10255")
|
||||
framework.ExpectNoError(err)
|
||||
|
174
vendor/k8s.io/kubernetes/test/e2e_node/gpus.go
generated
vendored
174
vendor/k8s.io/kubernetes/test/e2e_node/gpus.go
generated
vendored
@ -1,174 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func getGPUsAvailable(f *framework.Framework) int64 {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "getting node list")
|
||||
var gpusAvailable int64
|
||||
for _, node := range nodeList.Items {
|
||||
gpusAvailable += node.Status.Capacity.NvidiaGPU().Value()
|
||||
}
|
||||
return gpusAvailable
|
||||
}
|
||||
|
||||
func gpusExistOnAllNodes(f *framework.Framework) bool {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "getting node list")
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Name == "kubernetes-master" {
|
||||
continue
|
||||
}
|
||||
if node.Status.Capacity.NvidiaGPU().Value() == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkIfNvidiaGPUsExistOnNode() bool {
|
||||
// Cannot use `lspci` because it is not installed on all distros by default.
|
||||
err := exec.Command("/bin/sh", "-c", "find /sys/devices/pci* -type f | grep vendor | xargs cat | grep 0x10de").Run()
|
||||
if err != nil {
|
||||
framework.Logf("check for nvidia GPUs failed. Got Error: %v", err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = framework.KubeDescribe("GPU [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("gpu-test")
|
||||
Context("attempt to use GPUs if available", func() {
|
||||
It("setup the node and create pods to test gpus", func() {
|
||||
By("ensuring that Nvidia GPUs exist on the node")
|
||||
if !checkIfNvidiaGPUsExistOnNode() {
|
||||
Skip("Nvidia GPUs do not exist on the node. Skipping test.")
|
||||
}
|
||||
By("ensuring that dynamic kubelet configuration is enabled")
|
||||
enabled, err := isKubeletConfigEnabled(f)
|
||||
framework.ExpectNoError(err)
|
||||
if !enabled {
|
||||
Skip("Dynamic Kubelet configuration is not enabled. Skipping test.")
|
||||
}
|
||||
|
||||
By("enabling support for GPUs")
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
defer func() {
|
||||
if oldCfg != nil {
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, oldCfg))
|
||||
}
|
||||
}()
|
||||
|
||||
// Enable Accelerators
|
||||
oldCfg, err = getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
newCfg := oldCfg.DeepCopy()
|
||||
newCfg.FeatureGates[string(features.Accelerators)] = true
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||
|
||||
By("Waiting for GPUs to become available on the local node")
|
||||
Eventually(gpusExistOnAllNodes(f), 10*time.Minute, time.Second).Should(BeTrue())
|
||||
|
||||
By("Creating a pod that will consume all GPUs")
|
||||
podSuccess := makePod(getGPUsAvailable(f), "gpus-success")
|
||||
podSuccess = f.PodClient().CreateSync(podSuccess)
|
||||
|
||||
By("Checking the containers in the pod had restarted at-least twice successfully thereby ensuring GPUs are reused")
|
||||
const minContainerRestartCount = 2
|
||||
Eventually(func() bool {
|
||||
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podSuccess.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("failed to get pod status: %v", err)
|
||||
return false
|
||||
}
|
||||
if p.Status.ContainerStatuses[0].RestartCount < minContainerRestartCount {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, time.Minute, time.Second).Should(BeTrue())
|
||||
|
||||
By("Checking if the pod outputted Success to its logs")
|
||||
framework.ExpectNoError(f.PodClient().MatchContainerOutput(podSuccess.Name, podSuccess.Name, "Success"))
|
||||
|
||||
By("Creating a new pod requesting a GPU and noticing that it is rejected by the Kubelet")
|
||||
podFailure := makePod(1, "gpu-failure")
|
||||
framework.WaitForPodCondition(f.ClientSet, f.Namespace.Name, podFailure.Name, "pod rejected", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
return true, nil
|
||||
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
By("stopping the original Pod with GPUs")
|
||||
gp := int64(0)
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(podSuccess.Name, &deleteOptions, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
By("attempting to start the failed pod again")
|
||||
f.PodClient().DeleteSync(podFailure.Name, &deleteOptions, framework.DefaultPodDeletionTimeout)
|
||||
podFailure = f.PodClient().CreateSync(podFailure)
|
||||
|
||||
By("Checking if the pod outputted Success to its logs")
|
||||
framework.ExpectNoError(f.PodClient().MatchContainerOutput(podFailure.Name, podFailure.Name, "Success"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func makePod(gpus int64, name string) *v1.Pod {
|
||||
resources := v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceNvidiaGPU: *resource.NewQuantity(gpus, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
gpuverificationCmd := fmt.Sprintf("if [[ %d -ne $(ls /dev/ | egrep '^nvidia[0-9]+$' | wc -l) ]]; then exit 1; else echo Success; fi", gpus)
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: name,
|
||||
Command: []string{"sh", "-c", gpuverificationCmd},
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/e2e_node/gubernator.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/gubernator.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e_node/hugepages_test.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e_node/hugepages_test.go
generated
vendored
@ -19,7 +19,6 @@ package e2e_node
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -38,14 +37,14 @@ import (
|
||||
)
|
||||
|
||||
// makePodToVerifyHugePages returns a pod that verifies specified cgroup with hugetlb
|
||||
func makePodToVerifyHugePages(cgroupName cm.CgroupName, hugePagesLimit resource.Quantity) *apiv1.Pod {
|
||||
func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity) *apiv1.Pod {
|
||||
// convert the cgroup name to its literal form
|
||||
cgroupFsName := ""
|
||||
cgroupName = cm.CgroupName(path.Join(defaultNodeAllocatableCgroup, string(cgroupName)))
|
||||
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
|
||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
||||
cgroupFsName = cm.ConvertCgroupNameToSystemd(cgroupName, true)
|
||||
cgroupFsName = cgroupName.ToSystemd()
|
||||
} else {
|
||||
cgroupFsName = string(cgroupName)
|
||||
cgroupFsName = cgroupName.ToCgroupfs()
|
||||
}
|
||||
|
||||
// this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes
|
||||
@ -184,7 +183,7 @@ func runHugePagesTests(f *framework.Framework) {
|
||||
})
|
||||
podUID := string(pod.UID)
|
||||
By("checking if the expected hugetlb settings were applied")
|
||||
verifyPod := makePodToVerifyHugePages(cm.CgroupName("pod"+podUID), resource.MustParse("50Mi"))
|
||||
verifyPod := makePodToVerifyHugePages("pod"+podUID, resource.MustParse("50Mi"))
|
||||
f.PodClient().Create(verifyPod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -192,7 +191,7 @@ func runHugePagesTests(f *framework.Framework) {
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages]", func() {
|
||||
var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeFeature:HugePages]", func() {
|
||||
f := framework.NewDefaultFramework("hugepages-test")
|
||||
|
||||
Context("With config updated with hugepages feature enabled", func() {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/image_id_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/image_id_test.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("ImageID", func() {
|
||||
var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
|
||||
|
||||
busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff"
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/image_list.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/image_list.go
generated
vendored
@ -51,7 +51,7 @@ var NodeImageWhiteList = sets.NewString(
|
||||
imageutils.GetE2EImage(imageutils.ServeHostname),
|
||||
imageutils.GetE2EImage(imageutils.Netexec),
|
||||
imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||
imageutils.GetPauseImageNameForHostArch(),
|
||||
imageutils.GetPauseImageName(),
|
||||
framework.GetGPUDevicePluginImage(),
|
||||
)
|
||||
|
||||
|
@ -1,9 +0,0 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/benchmark/benchmark-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
PARALLELISM=1
|
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/conformance/conformance-jenkins.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/conformance/conformance-jenkins.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
@ -1,6 +0,0 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/copy-e2e-image.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/copy-e2e-image.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
@ -1,15 +0,0 @@
|
||||
GCI_IMAGE_PROJECT=container-vm-image-staging
|
||||
GCI_IMAGE_FAMILY=gci-next-canary
|
||||
GCI_IMAGE=$(gcloud compute images describe-from-family ${GCI_IMAGE_FAMILY} --project=${GCI_IMAGE_PROJECT} --format="value(name)")
|
||||
GCI_CLOUD_INIT=test/e2e_node/jenkins/gci-init.yaml
|
||||
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGES=${GCI_IMAGE}
|
||||
GCE_IMAGE_PROJECT=${GCI_IMAGE_PROJECT}
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=node-cos-docker-validation
|
||||
# user-data is the GCI cloud init config file.
|
||||
GCE_INSTANCE_METADATA="user-data<${GCI_CLOUD_INIT},gci-update-strategy=update_disabled"
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
TIMEOUT=1h
|
21
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/jenkins-perf.properties
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/jenkins-perf.properties
generated
vendored
@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
GCI_IMAGE_PROJECT=container-vm-image-staging
|
||||
GCI_IMAGE_FAMILY=gci-canary-test
|
||||
GCI_IMAGE=$(gcloud compute images describe-from-family ${GCI_IMAGE_FAMILY} --project=${GCI_IMAGE_PROJECT} --format="value(name)")
|
||||
DOCKER_VERSION=$(curl -fsSL --retry 3 https://api.github.com/repos/docker/docker/releases | tac | tac | grep -m 1 "\"tag_name\"\:" | grep -Eo "[0-9\.rc-]+")
|
||||
GCI_CLOUD_INIT=test/e2e_node/jenkins/gci-init.yaml
|
||||
|
||||
# Render the test config file
|
||||
GCE_IMAGE_CONFIG_PATH=`mktemp`
|
||||
CONFIG_FILE=test/e2e_node/jenkins/docker_validation/perf-config.yaml
|
||||
cp $CONFIG_FILE $GCE_IMAGE_CONFIG_PATH
|
||||
sed -i -e "s@{{IMAGE}}@${GCI_IMAGE}@g" $GCE_IMAGE_CONFIG_PATH
|
||||
sed -i -e "s@{{IMAGE_PROJECT}}@${GCI_IMAGE_PROJECT}@g" $GCE_IMAGE_CONFIG_PATH
|
||||
sed -i -e "s@{{METADATA}}@user-data<${GCI_CLOUD_INIT},gci-docker-version=${DOCKER_VERSION},gci-update-strategy=update_disabled@g" $GCE_IMAGE_CONFIG_PATH
|
||||
|
||||
GCE_HOSTS=
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=node-cos-docker-validation-ci
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]"'
|
||||
PARALLELISM=1
|
@ -1,17 +0,0 @@
|
||||
GCI_IMAGE_PROJECT=container-vm-image-staging
|
||||
GCI_IMAGE_FAMILY=gci-canary-test
|
||||
GCI_IMAGE=$(gcloud compute images describe-from-family ${GCI_IMAGE_FAMILY} --project=${GCI_IMAGE_PROJECT} --format="value(name)")
|
||||
DOCKER_VERSION=$(curl -fsSL --retry 3 https://api.github.com/repos/docker/docker/releases | tac | tac | grep -m 1 "\"tag_name\"\:" | grep -Eo "[0-9\.rc-]+")
|
||||
GCI_CLOUD_INIT=test/e2e_node/jenkins/gci-init.yaml
|
||||
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGES=${GCI_IMAGE}
|
||||
GCE_IMAGE_PROJECT=${GCI_IMAGE_PROJECT}
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=node-cos-docker-validation-ci
|
||||
# user-data is the GCI cloud init config file.
|
||||
# gci-docker-version specifies docker version in GCI image.
|
||||
GCE_INSTANCE_METADATA="user-data<${GCI_CLOUD_INIT},gci-docker-version=${DOCKER_VERSION},gci-update-strategy=update_disabled"
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
TIMEOUT=1h
|
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/e2e-node-jenkins.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/e2e-node-jenkins.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties
generated
vendored
@ -1,13 +0,0 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=
|
||||
GCE_IMAGES=ubuntu-gke-1604-xenial-v20170420-1
|
||||
GCE_IMAGE_PROJECT=ubuntu-os-gke-cloud
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ubuntu-node
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
TEST_ARGS='--generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
TIMEOUT=1h
|
||||
# Use the system spec defined in test/e2e_node/system/specs/gke.yaml.
|
||||
SYSTEM_SPEC_NAME=gke
|
9
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci.properties
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci.properties
generated
vendored
@ -1,9 +0,0 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
TEST_ARGS='--generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
TIMEOUT=1h
|
11
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-flaky.properties
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-flaky.properties
generated
vendored
@ -1,11 +0,0 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Flaky\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,LocalStorageCapacityIsolation=true,PodPriority=true --generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-pull.properties
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-pull.properties
generated
vendored
@ -1,9 +0,0 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-pr-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
|
||||
TEST_ARGS='--generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties
generated
vendored
@ -1,14 +0,0 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=
|
||||
GCE_IMAGES=ubuntu-gke-1604-xenial-v20170420-1
|
||||
GCE_IMAGE_PROJECT=ubuntu-os-gke-cloud
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ubuntu-node-serial
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
# Use the system spec defined at test/e2e_node/system/specs/gke.yaml.
|
||||
SYSTEM_SPEC_NAME=gke
|
10
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial.properties
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial.properties
generated
vendored
@ -1,10 +0,0 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config-serial.yaml
|
||||
GCE_ZONE=us-west1-b
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --generate-kubelet-config-file=true'
|
||||
KUBELET_ARGS=''
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
25
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/template.properties
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/template.properties
generated
vendored
@ -1,25 +0,0 @@
|
||||
# Copy this file to your home directory and modify
|
||||
# User used on the gce instances to run the test.
|
||||
GCE_USER=
|
||||
# Path to a yaml or json file describing images to run or empty
|
||||
GCE_IMAGE_CONFIG_PATH=
|
||||
# Names of gce hosts to test against (must be resolvable) or empty
|
||||
GCE_HOSTS=
|
||||
# Comma-separated names of gce images to test or empty (one or more of GCE_IMAGE_CONFIG_PATH, GCE_IMAGES, GCE_HOSTS is required)
|
||||
GCE_IMAGES=
|
||||
# Gce zone to use - required when using GCE_IMAGES
|
||||
GCE_ZONE=
|
||||
# Gce project to use for creating instances
|
||||
# required when using GCE_IMAGES or GCE_IMAGE_CONFIG_PATH
|
||||
GCE_PROJECT=
|
||||
# Gce project to use for GCE_IMAGES
|
||||
# required when using GCE_IMAGES
|
||||
GCE_IMAGE_PROJECT=
|
||||
# If true, delete instances created from GCE_IMAGES/GCE_IMAGE_CONFIG_PATH and files copied to GCE_HOSTS
|
||||
CLEANUP=true
|
||||
# KUBELET_ARGS are the arguments passed to kubelet. The args will override corresponding default kubelet
|
||||
# setting in the test framework and --kubelet-flags in TEST_ARGS.
|
||||
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
# TEST_ARGS are args passed to node e2e test.
|
||||
TEST_ARGS=''
|
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/ubuntu-14.04-nvidia-install.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/ubuntu-14.04-nvidia-install.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e_node/kubelet_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e_node/kubelet_test.go
generated
vendored
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
})
|
||||
Context("when scheduling a busybox command in a pod", func() {
|
||||
podName := "busybox-scheduling-" + string(uuid.NewUUID())
|
||||
framework.ConformanceIt("it should print the output to logs", func() {
|
||||
framework.ConformanceIt("it should print the output to logs [NodeConformance]", func() {
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should have an error terminated reason", func() {
|
||||
It("should have an error terminated reason [NodeConformance]", func() {
|
||||
Eventually(func() error {
|
||||
podData, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
}, time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
|
||||
It("should be possible to delete", func() {
|
||||
It("should be possible to delete [NodeConformance]", func() {
|
||||
err := podClient.Delete(podName, &metav1.DeleteOptions{})
|
||||
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
|
||||
})
|
||||
@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
Context("when scheduling a busybox Pod with hostAliases", func() {
|
||||
podName := "busybox-host-aliases" + string(uuid.NewUUID())
|
||||
|
||||
It("it should write entries to /etc/hosts", func() {
|
||||
It("it should write entries to /etc/hosts [NodeConformance]", func() {
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
})
|
||||
Context("when scheduling a read only busybox container", func() {
|
||||
podName := "busybox-readonly-fs" + string(uuid.NewUUID())
|
||||
framework.ConformanceIt("it should not write to root filesystem", func() {
|
||||
framework.ConformanceIt("it should not write to root filesystem [NodeConformance]", func() {
|
||||
isReadOnly := true
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
12
vendor/k8s.io/kubernetes/test/e2e_node/lifecycle_hook_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e_node/lifecycle_hook_test.go
generated
vendored
@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
|
||||
}
|
||||
}
|
||||
framework.ConformanceIt("should execute poststart exec hook properly", func() {
|
||||
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
@ -95,7 +95,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute prestop exec hook properly", func() {
|
||||
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute poststart http hook properly", func() {
|
||||
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
@ -116,10 +116,10 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageNameForHostArch(), lifecycle)
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute prestop http hook properly", func() {
|
||||
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
@ -129,7 +129,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageNameForHostArch(), lifecycle)
|
||||
podWithHook := getPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
})
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/log_path_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/log_path_test.go
generated
vendored
@ -35,7 +35,7 @@ const (
|
||||
checkContName = "checker-container"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("ContainerLogPath", func() {
|
||||
var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-container-log-path")
|
||||
Describe("Pod with a container", func() {
|
||||
Context("printed log to stdout", func() {
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e_node/mirror_pod_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e_node/mirror_pod_test.go
generated
vendored
@ -57,14 +57,14 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
framework.ConformanceIt("should be updated when static pod updated", func() {
|
||||
framework.ConformanceIt("should be updated when static pod updated [NodeConformance]", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
uid := pod.UID
|
||||
|
||||
By("update the static pod container image")
|
||||
image := imageutils.GetPauseImageNameForHostArch()
|
||||
image := imageutils.GetPauseImageName()
|
||||
err = createStaticPod(podPath, staticPodName, ns, image, v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
Expect(len(pod.Spec.Containers)).Should(Equal(1))
|
||||
Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
|
||||
})
|
||||
framework.ConformanceIt("should be recreated when mirror pod gracefully deleted", func() {
|
||||
framework.ConformanceIt("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
framework.ConformanceIt("should be recreated when mirror pod forcibly deleted", func() {
|
||||
framework.ConformanceIt("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
36
vendor/k8s.io/kubernetes/test/e2e_node/node_container_manager_test.go
generated
vendored
36
vendor/k8s.io/kubernetes/test/e2e_node/node_container_manager_test.go
generated
vendored
@ -21,7 +21,6 @@ package e2e_node
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -39,7 +38,7 @@ import (
|
||||
)
|
||||
|
||||
func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.EnforceNodeAllocatable = []string{"pods", "kube-reserved", "system-reserved"}
|
||||
initialConfig.EnforceNodeAllocatable = []string{"pods", kubeReservedCgroup, systemReservedCgroup}
|
||||
initialConfig.SystemReserved = map[string]string{
|
||||
string(v1.ResourceCPU): "100m",
|
||||
string(v1.ResourceMemory): "100Mi",
|
||||
@ -57,7 +56,7 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration)
|
||||
|
||||
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("node-container-manager")
|
||||
Describe("Validate Node Allocatable", func() {
|
||||
Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
|
||||
It("set's up the node and runs the test", func() {
|
||||
framework.ExpectNoError(runTest(f))
|
||||
})
|
||||
@ -99,8 +98,8 @@ func getAllocatableLimits(cpu, memory string, capacity v1.ResourceList) (*resour
|
||||
}
|
||||
|
||||
const (
|
||||
kubeReservedCgroup = "/kube_reserved"
|
||||
systemReservedCgroup = "/system_reserved"
|
||||
kubeReservedCgroup = "kube-reserved"
|
||||
systemReservedCgroup = "system-reserved"
|
||||
)
|
||||
|
||||
func createIfNotExists(cm cm.CgroupManager, cgroupConfig *cm.CgroupConfig) error {
|
||||
@ -115,13 +114,13 @@ func createIfNotExists(cm cm.CgroupManager, cgroupConfig *cm.CgroupConfig) error
|
||||
func createTemporaryCgroupsForReservation(cgroupManager cm.CgroupManager) error {
|
||||
// Create kube reserved cgroup
|
||||
cgroupConfig := &cm.CgroupConfig{
|
||||
Name: cm.CgroupName(kubeReservedCgroup),
|
||||
Name: cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup),
|
||||
}
|
||||
if err := createIfNotExists(cgroupManager, cgroupConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create system reserved cgroup
|
||||
cgroupConfig.Name = cm.CgroupName(systemReservedCgroup)
|
||||
cgroupConfig.Name = cm.NewCgroupName(cm.RootCgroupName, systemReservedCgroup)
|
||||
|
||||
return createIfNotExists(cgroupManager, cgroupConfig)
|
||||
}
|
||||
@ -129,12 +128,12 @@ func createTemporaryCgroupsForReservation(cgroupManager cm.CgroupManager) error
|
||||
func destroyTemporaryCgroupsForReservation(cgroupManager cm.CgroupManager) error {
|
||||
// Create kube reserved cgroup
|
||||
cgroupConfig := &cm.CgroupConfig{
|
||||
Name: cm.CgroupName(kubeReservedCgroup),
|
||||
Name: cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup),
|
||||
}
|
||||
if err := cgroupManager.Destroy(cgroupConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
cgroupConfig.Name = cm.CgroupName(systemReservedCgroup)
|
||||
cgroupConfig.Name = cm.NewCgroupName(cm.RootCgroupName, systemReservedCgroup)
|
||||
return cgroupManager.Destroy(cgroupConfig)
|
||||
}
|
||||
|
||||
@ -173,8 +172,9 @@ func runTest(f *framework.Framework) error {
|
||||
// Set new config and current config.
|
||||
currentConfig := newCfg
|
||||
|
||||
expectedNAPodCgroup := path.Join(currentConfig.CgroupRoot, "kubepods")
|
||||
if !cgroupManager.Exists(cm.CgroupName(expectedNAPodCgroup)) {
|
||||
expectedNAPodCgroup := cm.ParseCgroupfsToCgroupName(currentConfig.CgroupRoot)
|
||||
expectedNAPodCgroup = cm.NewCgroupName(expectedNAPodCgroup, "kubepods")
|
||||
if !cgroupManager.Exists(expectedNAPodCgroup) {
|
||||
return fmt.Errorf("Expected Node Allocatable Cgroup Does not exist")
|
||||
}
|
||||
// TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings.
|
||||
@ -218,30 +218,32 @@ func runTest(f *framework.Framework) error {
|
||||
return nil
|
||||
}, time.Minute, 5*time.Second).Should(BeNil())
|
||||
|
||||
if !cgroupManager.Exists(cm.CgroupName(kubeReservedCgroup)) {
|
||||
kubeReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup)
|
||||
if !cgroupManager.Exists(kubeReservedCgroupName) {
|
||||
return fmt.Errorf("Expected kube reserved cgroup Does not exist")
|
||||
}
|
||||
// Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`.
|
||||
kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceCPU)])
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], kubeReservedCgroup, "cpu.shares"), int64(cm.MilliCPUToShares(kubeReservedCPU.MilliValue())), 10); err != nil {
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupManager.Name(kubeReservedCgroupName), "cpu.shares"), int64(cm.MilliCPUToShares(kubeReservedCPU.MilliValue())), 10); err != nil {
|
||||
return err
|
||||
}
|
||||
// Expect Memory limit kube reserved cgroup to equal configured value `100Mi`.
|
||||
kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceMemory)])
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], kubeReservedCgroup, "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil {
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupManager.Name(kubeReservedCgroupName), "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cgroupManager.Exists(cm.CgroupName(systemReservedCgroup)) {
|
||||
systemReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, systemReservedCgroup)
|
||||
if !cgroupManager.Exists(systemReservedCgroupName) {
|
||||
return fmt.Errorf("Expected system reserved cgroup Does not exist")
|
||||
}
|
||||
// Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`.
|
||||
systemReservedCPU := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceCPU)])
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], systemReservedCgroup, "cpu.shares"), int64(cm.MilliCPUToShares(systemReservedCPU.MilliValue())), 10); err != nil {
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupManager.Name(systemReservedCgroupName), "cpu.shares"), int64(cm.MilliCPUToShares(systemReservedCPU.MilliValue())), 10); err != nil {
|
||||
return err
|
||||
}
|
||||
// Expect Memory limit on node allocatable cgroup to equal allocatable.
|
||||
systemReservedMemory := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceMemory)])
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], systemReservedCgroup, "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil {
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupManager.Name(systemReservedCgroupName), "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/node_problem_detector_linux.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/node_problem_detector_linux.go
generated
vendored
@ -40,7 +40,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
||||
var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector]", func() {
|
||||
const (
|
||||
pollInterval = 1 * time.Second
|
||||
pollConsistent = 5 * time.Second
|
||||
|
52
vendor/k8s.io/kubernetes/test/e2e_node/pods_container_manager_test.go
generated
vendored
52
vendor/k8s.io/kubernetes/test/e2e_node/pods_container_manager_test.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -53,8 +54,6 @@ func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequir
|
||||
}
|
||||
|
||||
const (
|
||||
// Kubelet internal cgroup name for node allocatable cgroup.
|
||||
defaultNodeAllocatableCgroup = "kubepods"
|
||||
// Kubelet internal cgroup name for burstable tier
|
||||
burstableCgroup = "burstable"
|
||||
// Kubelet internal cgroup name for besteffort tier
|
||||
@ -62,17 +61,15 @@ const (
|
||||
)
|
||||
|
||||
// makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups.
|
||||
func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *v1.Pod {
|
||||
func makePodToVerifyCgroups(cgroupNames []string) *v1.Pod {
|
||||
// convert the names to their literal cgroupfs forms...
|
||||
cgroupFsNames := []string{}
|
||||
for _, cgroupName := range cgroupNames {
|
||||
rootCgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup)
|
||||
for _, baseName := range cgroupNames {
|
||||
// Add top level cgroup used to enforce node allocatable.
|
||||
cgroupName = cm.CgroupName(path.Join(defaultNodeAllocatableCgroup, string(cgroupName)))
|
||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
||||
cgroupFsNames = append(cgroupFsNames, cm.ConvertCgroupNameToSystemd(cgroupName, true))
|
||||
} else {
|
||||
cgroupFsNames = append(cgroupFsNames, string(cgroupName))
|
||||
}
|
||||
cgroupComponents := strings.Split(baseName, "/")
|
||||
cgroupName := cm.NewCgroupName(rootCgroupName, cgroupComponents...)
|
||||
cgroupFsNames = append(cgroupFsNames, toCgroupFsName(cgroupName))
|
||||
}
|
||||
glog.Infof("expecting %v cgroups to be found", cgroupFsNames)
|
||||
// build the pod command to either verify cgroups exist
|
||||
@ -115,11 +112,10 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *v1.Pod {
|
||||
}
|
||||
|
||||
// makePodToVerifyCgroupRemoved verfies the specified cgroup does not exist.
|
||||
func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *v1.Pod {
|
||||
cgroupFsName := string(cgroupName)
|
||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
||||
cgroupFsName = cm.ConvertCgroupNameToSystemd(cm.CgroupName(cgroupName), true)
|
||||
}
|
||||
func makePodToVerifyCgroupRemoved(baseName string) *v1.Pod {
|
||||
components := strings.Split(baseName, "/")
|
||||
cgroupName := cm.NewCgroupName(cm.RootCgroupName, components...)
|
||||
cgroupFsName := toCgroupFsName(cgroupName)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
@ -156,11 +152,11 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-cgroup-manager")
|
||||
Describe("QOS containers", func() {
|
||||
Context("On enabling QOS cgroup hierarchy", func() {
|
||||
It("Top level QoS containers should have been created", func() {
|
||||
It("Top level QoS containers should have been created [NodeConformance]", func() {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName(burstableCgroup), cm.CgroupName(bestEffortCgroup)}
|
||||
cgroupsToVerify := []string{burstableCgroup, bestEffortCgroup}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
@ -169,7 +165,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Pod containers", func() {
|
||||
Describe("Pod containers [NodeConformance]", func() {
|
||||
Context("On scheduling a Guaranteed Pod", func() {
|
||||
It("Pod containers should have been created under the cgroup-root", func() {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
@ -188,7 +184,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
@ -198,7 +194,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
podUID = string(guaranteedPod.UID)
|
||||
})
|
||||
By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("pod" + podUID)}
|
||||
cgroupsToVerify := []string{"pod" + podUID}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
@ -207,7 +203,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("pod" + podUID))
|
||||
pod := makePodToVerifyCgroupRemoved("pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -232,7 +228,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
|
||||
},
|
||||
@ -242,7 +238,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
podUID = string(bestEffortPod.UID)
|
||||
})
|
||||
By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("besteffort/pod" + podUID)}
|
||||
cgroupsToVerify := []string{"besteffort/pod" + podUID}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
@ -251,7 +247,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("besteffort/pod" + podUID))
|
||||
pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -276,7 +272,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
@ -286,7 +282,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
podUID = string(burstablePod.UID)
|
||||
})
|
||||
By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("burstable/pod" + podUID)}
|
||||
cgroupsToVerify := []string{"burstable/pod" + podUID}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
@ -295,7 +291,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("burstable/pod" + podUID))
|
||||
pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e_node/remote/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e_node/remote/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/remote",
|
||||
deps = [
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
],
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/remote/cadvisor_e2e.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/remote/cadvisor_e2e.go
generated
vendored
@ -23,8 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// CAdvisorE2ERemote contains the specific functions in the cadvisor e2e test suite.
|
||||
@ -37,7 +36,7 @@ func InitCAdvisorE2ERemote() TestSuite {
|
||||
|
||||
// SetupTestPackage implements TestSuite.SetupTestPackage
|
||||
func (n *CAdvisorE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
|
||||
cadvisorRootDir, err := builder.GetCAdvisorRootDir()
|
||||
cadvisorRootDir, err := utils.GetCAdvisorRootDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// ConformanceRemote contains the specific functions in the node conformance test suite.
|
||||
@ -39,7 +40,7 @@ func InitConformanceRemote() TestSuite {
|
||||
|
||||
// getConformanceDirectory gets node conformance test build directory.
|
||||
func getConformanceDirectory() (string, error) {
|
||||
k8sRoot, err := builder.GetK8sRootDir()
|
||||
k8sRoot, err := utils.GetK8sRootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -106,7 +107,7 @@ func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) erro
|
||||
}
|
||||
|
||||
// Make sure we can find the newly built binaries
|
||||
buildOutputDir, err := builder.GetK8sBuildOutputDir()
|
||||
buildOutputDir, err := utils.GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes build output directory %v", err)
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -49,12 +50,12 @@ func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
|
||||
}
|
||||
|
||||
// Make sure we can find the newly built binaries
|
||||
buildOutputDir, err := builder.GetK8sBuildOutputDir()
|
||||
buildOutputDir, err := utils.GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes build output directory: %v", err)
|
||||
}
|
||||
|
||||
rootDir, err := builder.GetK8sRootDir()
|
||||
rootDir, err := utils.GetK8sRootDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes root directory: %v", err)
|
||||
}
|
||||
|
30
vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -66,7 +68,7 @@ func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) {
|
||||
func RunRemote(suite TestSuite, archive string, host string, cleanup bool, imageDesc, junitFilePrefix string, testArgs string, ginkgoArgs string, systemSpecName string) (string, bool, error) {
|
||||
// Create the temp staging directory
|
||||
glog.V(2).Infof("Staging test binaries on %q", host)
|
||||
workspace := fmt.Sprintf("/tmp/node-e2e-%s", getTimestamp())
|
||||
workspace := newWorkspaceDir()
|
||||
// Do not sudo here, so that we can use scp to copy test archive to the directdory.
|
||||
if output, err := SSHNoSudo(host, "mkdir", workspace); err != nil {
|
||||
// Exit failure with the error
|
||||
@ -126,13 +128,35 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image
|
||||
return output, len(aggErrs) == 0, utilerrors.NewAggregate(aggErrs)
|
||||
}
|
||||
|
||||
// timestampFormat is the timestamp format used in the node e2e directory name.
|
||||
const timestampFormat = "20060102T150405"
|
||||
const (
|
||||
// workspaceDirPrefix is the string prefix used in the workspace directory name.
|
||||
workspaceDirPrefix = "node-e2e-"
|
||||
// timestampFormat is the timestamp format used in the node e2e directory name.
|
||||
timestampFormat = "20060102T150405"
|
||||
)
|
||||
|
||||
func getTimestamp() string {
|
||||
return fmt.Sprintf(time.Now().Format(timestampFormat))
|
||||
}
|
||||
|
||||
func newWorkspaceDir() string {
|
||||
return filepath.Join("/tmp", workspaceDirPrefix+getTimestamp())
|
||||
}
|
||||
|
||||
// Parses the workspace directory name and gets the timestamp part of it.
|
||||
// This can later be used to name other artifacts (such as the
|
||||
// kubelet-${instance}.service systemd transient service used to launch
|
||||
// Kubelet) so that they can be matched to each other.
|
||||
func GetTimestampFromWorkspaceDir(dir string) string {
|
||||
dirTimestamp := strings.TrimPrefix(filepath.Base(dir), workspaceDirPrefix)
|
||||
re := regexp.MustCompile("^\\d{8}T\\d{6}$")
|
||||
if re.MatchString(dirTimestamp) {
|
||||
return dirTimestamp
|
||||
}
|
||||
// Fallback: if we can't find that timestamp, default to using Now()
|
||||
return getTimestamp()
|
||||
}
|
||||
|
||||
func getTestArtifacts(host, testDir string) error {
|
||||
logPath := filepath.Join(*resultsDir, host)
|
||||
if err := os.MkdirAll(logPath, 0755); err != nil {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/remote/utils.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/remote/utils.go
generated
vendored
@ -56,7 +56,7 @@ func setupCNI(host, workspace string) error {
|
||||
cniPath := filepath.Join(workspace, cniDirectory)
|
||||
cmd := getSSHCommand(" ; ",
|
||||
fmt.Sprintf("mkdir -p %s", cniPath),
|
||||
fmt.Sprintf("wget -O - %s | tar -xz -C %s", cniURL, cniPath),
|
||||
fmt.Sprintf("curl -s -L %s | tar -xz -C %s", cniURL, cniPath),
|
||||
)
|
||||
if output, err := SSH(host, "sh", "-c", cmd); err != nil {
|
||||
return fmt.Errorf("failed to install cni plugin on %q: %v output: %q", host, err, output)
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/resource_collector.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/resource_collector.go
generated
vendored
@ -220,9 +220,7 @@ func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]f
|
||||
|
||||
// We must make a copy of array, otherwise the timeseries order is changed.
|
||||
usages := make([]*framework.ContainerResourceUsage, 0)
|
||||
for _, usage := range r.buffers[containerName] {
|
||||
usages = append(usages, usage)
|
||||
}
|
||||
usages = append(usages, r.buffers[containerName]...)
|
||||
|
||||
sort.Sort(resourceUsageByCPU(usages))
|
||||
for _, q := range percentiles {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/resource_usage_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/resource_usage_test.go
generated
vendored
@ -143,7 +143,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
// sleep for an interval here to measure steady data
|
||||
sleepAfterCreatePods = 10 * time.Second
|
||||
)
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), "test_pod")
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), "test_pod")
|
||||
|
||||
rc.Start()
|
||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/restart_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/restart_test.go
generated
vendored
@ -59,7 +59,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
|
||||
return runningPods
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeature:ContainerRuntimeRestart]", func() {
|
||||
const (
|
||||
// Saturate the node. It's not necessary that all these pods enter
|
||||
// Running/Ready, because we don't know the number of cores in the
|
||||
@ -81,7 +81,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
||||
Context("Network", func() {
|
||||
It("should recover from ip leak", func() {
|
||||
|
||||
pods := newTestPods(podCount, false, imageutils.GetPauseImageNameForHostArch(), "restart-container-runtime-test")
|
||||
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
|
||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||
defer deletePodsSync(f, pods)
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/BUILD
generated
vendored
@ -17,6 +17,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/runner/local",
|
||||
deps = [
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -49,7 +50,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Run node e2e test
|
||||
outputDir, err := builder.GetK8sBuildOutputDir()
|
||||
outputDir, err := utils.GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get build output directory: %v", err)
|
||||
}
|
||||
@ -59,7 +60,7 @@ func main() {
|
||||
|
||||
args := []string{*ginkgoFlags, test, "--", *testFlags}
|
||||
if *systemSpecName != "" {
|
||||
rootDir, err := builder.GetK8sRootDir()
|
||||
rootDir, err := utils.GetK8sRootDir()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get k8s root directory: %v", err)
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go
generated
vendored
@ -134,7 +134,7 @@ type ImageConfig struct {
|
||||
|
||||
type Accelerator struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Count int64 `json:"count, omitempty"`
|
||||
Count int64 `json:"count,omitempty"`
|
||||
}
|
||||
|
||||
type Resources struct {
|
||||
@ -142,19 +142,19 @@ type Resources struct {
|
||||
}
|
||||
|
||||
type GCEImage struct {
|
||||
Image string `json:"image, omitempty"`
|
||||
ImageDesc string `json:"image_description, omitempty"`
|
||||
Image string `json:"image,omitempty"`
|
||||
ImageDesc string `json:"image_description,omitempty"`
|
||||
Project string `json:"project"`
|
||||
Metadata string `json:"metadata"`
|
||||
ImageRegex string `json:"image_regex, omitempty"`
|
||||
ImageRegex string `json:"image_regex,omitempty"`
|
||||
// Defaults to using only the latest image. Acceptable values are [0, # of images that match the regex).
|
||||
// If the number of existing previous images is lesser than what is desired, the test will use that is available.
|
||||
PreviousImages int `json:"previous_images, omitempty"`
|
||||
PreviousImages int `json:"previous_images,omitempty"`
|
||||
|
||||
Machine string `json:"machine, omitempty"`
|
||||
Resources Resources `json:"resources, omitempty"`
|
||||
Machine string `json:"machine,omitempty"`
|
||||
Resources Resources `json:"resources,omitempty"`
|
||||
// This test is for benchmark (no limit verification, more result log, node name has format 'machine-image-uuid') if 'Tests' is non-empty.
|
||||
Tests []string `json:"tests, omitempty"`
|
||||
Tests []string `json:"tests,omitempty"`
|
||||
}
|
||||
|
||||
type internalImageConfig struct {
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e_node/runtime_conformance_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e_node/runtime_conformance_test.go
generated
vendored
@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
||||
|
||||
Describe("container runtime conformance blackbox test", func() {
|
||||
Context("when starting a container that exits", func() {
|
||||
framework.ConformanceIt("it should run with the expected status", func() {
|
||||
framework.ConformanceIt("it should run with the expected status [NodeConformance]", func() {
|
||||
restartCountVolumeName := "restart-count"
|
||||
restartCountVolumePath := "/restart-count"
|
||||
testContainer := v1.Container{
|
||||
@ -127,7 +127,7 @@ while true; do sleep 1; done
|
||||
By("it should get the expected 'State'")
|
||||
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
|
||||
|
||||
By("it should be possible to delete [Conformance]")
|
||||
By("it should be possible to delete [Conformance][NodeConformance]")
|
||||
Expect(terminateContainer.Delete()).To(Succeed())
|
||||
Eventually(terminateContainer.Present, retryTimeout, pollInterval).Should(BeFalse())
|
||||
}
|
||||
@ -142,7 +142,7 @@ while true; do sleep 1; done
|
||||
message gomegatypes.GomegaMatcher
|
||||
}{
|
||||
{
|
||||
name: "if TerminationMessagePath is set [Conformance]",
|
||||
name: "if TerminationMessagePath is set [Conformance][NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
@ -157,7 +157,7 @@ while true; do sleep 1; done
|
||||
},
|
||||
|
||||
{
|
||||
name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance]",
|
||||
name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance][NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
@ -172,7 +172,7 @@ while true; do sleep 1; done
|
||||
},
|
||||
|
||||
{
|
||||
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance]",
|
||||
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance][NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
@ -185,7 +185,7 @@ while true; do sleep 1; done
|
||||
},
|
||||
|
||||
{
|
||||
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set",
|
||||
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
@ -198,7 +198,7 @@ while true; do sleep 1; done
|
||||
},
|
||||
|
||||
{
|
||||
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance]",
|
||||
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance][NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
@ -313,7 +313,7 @@ while true; do sleep 1; done
|
||||
},
|
||||
} {
|
||||
testCase := testCase
|
||||
It(testCase.description+" [Conformance]", func() {
|
||||
It(testCase.description+" [Conformance][NodeConformance]", func() {
|
||||
name := "image-pull-test"
|
||||
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
|
||||
container := ConformanceContainer{
|
||||
|
104
vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go
generated
vendored
104
vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go
generated
vendored
@ -26,6 +26,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -39,6 +41,78 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace][NodeAlphaFeature:ShareProcessNamespace]", func() {
|
||||
It("containers in pods using isolated PID namespaces should all receive PID 1", func() {
|
||||
By("Create a pod with isolated PID namespaces.")
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "isolated-pid-ns-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container-1",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/top"},
|
||||
},
|
||||
{
|
||||
Name: "test-container-2",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/sleep"},
|
||||
Args: []string{"10000"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("Check if both containers receive PID 1.")
|
||||
pid1 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||
pid2 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep")
|
||||
if pid1 != "1" || pid2 != "1" {
|
||||
framework.Failf("PIDs of different containers are not all 1: test-container-1=%v, test-container-2=%v", pid1, pid2)
|
||||
}
|
||||
})
|
||||
|
||||
It("processes in containers sharing a pod namespace should be able to see each other [Alpha]", func() {
|
||||
By("Check whether shared PID namespace is supported.")
|
||||
isEnabled, err := isSharedPIDNamespaceSupported()
|
||||
framework.ExpectNoError(err)
|
||||
if !isEnabled {
|
||||
framework.Skipf("Skipped because shared PID namespace is not supported by this docker version.")
|
||||
}
|
||||
// It's not enough to set this flag in the kubelet because the apiserver needs it too
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.PodShareProcessNamespace) {
|
||||
framework.Skipf("run test with --feature-gates=PodShareProcessNamespace=true to test PID namespace sharing")
|
||||
}
|
||||
|
||||
By("Create a pod with shared PID namespace.")
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
ShareProcessNamespace: &[]bool{true}[0],
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container-1",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/top"},
|
||||
},
|
||||
{
|
||||
Name: "test-container-2",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/sleep"},
|
||||
Args: []string{"10000"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("Check if the process in one container is visible to the process in the other.")
|
||||
pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||
pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
|
||||
if pid1 != pid2 {
|
||||
framework.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("when creating a pod in the host PID namespace", func() {
|
||||
makeHostPidPod := func(podName, image string, command []string, hostPID bool) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
@ -82,7 +156,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
nginxPid = strings.TrimSpace(output)
|
||||
})
|
||||
|
||||
It("should show its pid in the host PID namespace", func() {
|
||||
It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() {
|
||||
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
|
||||
createAndWaitHostPidPod(busyboxPodName, true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
@ -102,7 +176,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should not show its pid in the non-hostpid containers", func() {
|
||||
It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() {
|
||||
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
|
||||
createAndWaitHostPidPod(busyboxPodName, false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
@ -158,7 +232,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
|
||||
})
|
||||
|
||||
It("should show the shared memory ID in the host IPC containers", func() {
|
||||
It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
|
||||
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
|
||||
createAndWaitHostIPCPod(ipcutilsPodName, true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
||||
@ -173,7 +247,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should not show the shared memory ID in the non-hostIPC containers", func() {
|
||||
It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() {
|
||||
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
|
||||
createAndWaitHostIPCPod(ipcutilsPodName, false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
||||
@ -241,7 +315,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
framework.Logf("Opened a new tcp port %q", listeningPort)
|
||||
})
|
||||
|
||||
It("should listen on same port in the host network containers", func() {
|
||||
It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
|
||||
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
|
||||
createAndWaitHostNetworkPod(busyboxPodName, true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
@ -255,7 +329,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("shouldn't show the same port in the non-hostnetwork containers", func() {
|
||||
It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() {
|
||||
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
|
||||
createAndWaitHostNetworkPod(busyboxPodName, false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
@ -308,11 +382,11 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
It("should run the container with uid 65534", func() {
|
||||
It("should run the container with uid 65534 [NodeConformance]", func() {
|
||||
createAndWaitUserPod(65534)
|
||||
})
|
||||
|
||||
It("should run the container with uid 0", func() {
|
||||
It("should run the container with uid 0 [NodeConformance]", func() {
|
||||
createAndWaitUserPod(0)
|
||||
})
|
||||
})
|
||||
@ -355,11 +429,11 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
return podName
|
||||
}
|
||||
|
||||
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true", func() {
|
||||
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]", func() {
|
||||
createAndWaitUserPod(true)
|
||||
})
|
||||
|
||||
It("should run the container with writable rootfs when readOnlyRootFilesystem=false", func() {
|
||||
It("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() {
|
||||
createAndWaitUserPod(false)
|
||||
})
|
||||
})
|
||||
@ -423,14 +497,14 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
return nil
|
||||
}
|
||||
|
||||
It("should allow privilege escalation when not explicitly set and uid != 0", func() {
|
||||
It("should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]", func() {
|
||||
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
|
||||
framework.Failf("Match output for pod %q failed: %v", podName, err)
|
||||
}
|
||||
})
|
||||
|
||||
It("should not allow privilege escalation when false", func() {
|
||||
It("should not allow privilege escalation when false [NodeConformance]", func() {
|
||||
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
|
||||
apeFalse := false
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
|
||||
@ -438,7 +512,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should allow privilege escalation when true", func() {
|
||||
It("should allow privilege escalation when true [NodeConformance]", func() {
|
||||
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
|
||||
apeTrue := true
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
|
||||
@ -481,7 +555,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
return podName
|
||||
}
|
||||
|
||||
It("should run the container as privileged when true", func() {
|
||||
It("should run the container as privileged when true [NodeFeature:HostAccess]", func() {
|
||||
podName := createAndWaitUserPod(true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
if err != nil {
|
||||
@ -494,7 +568,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should run the container as unprivileged when false", func() {
|
||||
It("should run the container as unprivileged when false [NodeConformance]", func() {
|
||||
podName := createAndWaitUserPod(false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
if err != nil {
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/services/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/services/BUILD
generated
vendored
@ -23,14 +23,14 @@ go_library(
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//cmd/kubelet/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/controller/namespace:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/util/codec:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//test/e2e_node/remote:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
@ -40,7 +40,6 @@ go_library(
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
|
20
vendor/k8s.io/kubernetes/test/e2e_node/services/apiserver.go
generated
vendored
20
vendor/k8s.io/kubernetes/test/e2e_node/services/apiserver.go
generated
vendored
@ -40,27 +40,33 @@ func NewAPIServer() *APIServer {
|
||||
|
||||
// Start starts the apiserver, returns when apiserver is ready.
|
||||
func (a *APIServer) Start() error {
|
||||
config := options.NewServerRunOptions()
|
||||
config.Etcd.StorageConfig.ServerList = []string{getEtcdClientURL()}
|
||||
o := options.NewServerRunOptions()
|
||||
o.Etcd.StorageConfig.ServerList = []string{getEtcdClientURL()}
|
||||
// TODO: Current setup of etcd in e2e-node tests doesn't support etcd v3
|
||||
// protocol. We should migrate it to use the same infrastructure as all
|
||||
// other tests (pkg/storage/etcd/testing).
|
||||
config.Etcd.StorageConfig.Type = "etcd2"
|
||||
o.Etcd.StorageConfig.Type = "etcd2"
|
||||
_, ipnet, err := net.ParseCIDR(clusterIPRange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ServiceClusterIPRange = *ipnet
|
||||
config.AllowPrivileged = true
|
||||
config.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
o.ServiceClusterIPRange = *ipnet
|
||||
o.AllowPrivileged = true
|
||||
o.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
err := apiserver.Run(config, stopCh)
|
||||
completedOptions, err := apiserver.Complete(o)
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("set apiserver default options error: %v", err)
|
||||
return
|
||||
}
|
||||
err = apiserver.Run(completedOptions, stopCh)
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("run apiserver error: %v", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
|
59
vendor/k8s.io/kubernetes/test/e2e_node/services/kubelet.go
generated
vendored
59
vendor/k8s.io/kubernetes/test/e2e_node/services/kubelet.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@ -31,16 +30,16 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilflag "k8s.io/apiserver/pkg/util/flag"
|
||||
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/e2e_node/remote"
|
||||
)
|
||||
|
||||
// TODO(random-liu): Replace this with standard kubelet launcher.
|
||||
@ -55,12 +54,6 @@ func (a *args) String() string {
|
||||
|
||||
// Set function of flag.Value
|
||||
func (a *args) Set(value string) error {
|
||||
// Someone else is calling flag.Parse after the flags are parsed in the
|
||||
// test framework. Use this to avoid the flag being parsed twice.
|
||||
// TODO(random-liu): Figure out who is parsing the flags.
|
||||
if flag.Parsed() {
|
||||
return nil
|
||||
}
|
||||
// Note that we assume all white space in flag string is separating fields
|
||||
na := strings.Fields(value)
|
||||
*a = append(*a, na...)
|
||||
@ -198,7 +191,12 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
// Since kubelet will typically be run as a service it also makes more
|
||||
// sense to test it that way
|
||||
isSystemd = true
|
||||
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
|
||||
// We can ignore errors, to have GetTimestampFromWorkspaceDir() fallback
|
||||
// to the current time.
|
||||
cwd, _ := os.Getwd()
|
||||
// Use the timestamp from the current directory to name the systemd unit.
|
||||
unitTimestamp := remote.GetTimestampFromWorkspaceDir(cwd)
|
||||
unitName := fmt.Sprintf("kubelet-%s.service", unitTimestamp)
|
||||
if kubeletContainerized {
|
||||
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit",
|
||||
"/usr/bin/docker", "run", "--name=kubelet",
|
||||
@ -207,7 +205,7 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
"-v", "/etc/localtime:/etc/localtime:ro",
|
||||
"-v", "/etc/machine-id:/etc/machine-id:ro",
|
||||
"-v", filepath.Dir(kubeconfigPath)+":/etc/kubernetes",
|
||||
"-v", "/:/rootfs:ro,rslave",
|
||||
"-v", "/:/rootfs:rw,rslave",
|
||||
"-v", "/run:/run",
|
||||
"-v", "/sys/fs/cgroup:/sys/fs/cgroup:rw",
|
||||
"-v", "/sys:/sys:rw",
|
||||
@ -221,7 +219,7 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
|
||||
// if we will generate a kubelet config file, we need to mount that path into the container too
|
||||
if genKubeletConfigFile {
|
||||
cmdArgs = append(cmdArgs, "-v", filepath.Dir(kubeletConfigPath)+":"+filepath.Dir(kubeletConfigPath)+":ro")
|
||||
cmdArgs = append(cmdArgs, "-v", filepath.Dir(kubeletConfigPath)+":"+filepath.Dir(kubeletConfigPath)+":rw")
|
||||
}
|
||||
|
||||
cmdArgs = append(cmdArgs, hyperkubeImage, "/hyperkube", "kubelet", "--containerized")
|
||||
@ -258,9 +256,8 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--kubeconfig", kubeconfigPath,
|
||||
"--root-dir", KubeletRootDirectory,
|
||||
"--docker-disable-shared-pid=false",
|
||||
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
|
||||
"--allow-privileged", "true",
|
||||
"--allow-privileged=true",
|
||||
)
|
||||
|
||||
// Apply test framework feature gates by default. This could also be overridden
|
||||
@ -350,27 +347,13 @@ func addKubeletConfigFlags(cmdArgs *[]string, kc *kubeletconfig.KubeletConfigura
|
||||
fs := pflag.NewFlagSet("kubelet", pflag.ExitOnError)
|
||||
options.AddKubeletConfigFlags(fs, kc)
|
||||
for _, name := range flags {
|
||||
*cmdArgs = append(*cmdArgs, "--"+name, fs.Lookup(name).Value.String())
|
||||
*cmdArgs = append(*cmdArgs, fmt.Sprintf("--%s=%s", name, fs.Lookup(name).Value.String()))
|
||||
}
|
||||
}
|
||||
|
||||
// writeKubeletConfigFile writes the kubelet config file based on the args and returns the filename
|
||||
func writeKubeletConfigFile(internal *kubeletconfig.KubeletConfiguration, path string) error {
|
||||
// extract the KubeletConfiguration and convert to versioned
|
||||
versioned := &v1beta1.KubeletConfiguration{}
|
||||
scheme, _, err := scheme.NewSchemeAndCodecs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := scheme.Convert(internal, versioned, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
// encode
|
||||
encoder, err := newKubeletConfigJSONEncoder()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := runtime.Encode(encoder, versioned)
|
||||
data, err := kubeletconfigcodec.EncodeKubeletConfig(internal, kubeletconfigv1beta1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -386,20 +369,6 @@ func writeKubeletConfigFile(internal *kubeletconfig.KubeletConfiguration, path s
|
||||
return nil
|
||||
}
|
||||
|
||||
func newKubeletConfigJSONEncoder() (runtime.Encoder, error) {
|
||||
_, kubeletCodecs, err := scheme.NewSchemeAndCodecs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mediaType := "application/json"
|
||||
info, ok := runtime.SerializerInfoForMediaType(kubeletCodecs.SupportedMediaTypes(), mediaType)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported media type %q", mediaType)
|
||||
}
|
||||
return kubeletCodecs.EncoderForVersion(info.Serializer, v1beta1.SchemeGroupVersion), nil
|
||||
}
|
||||
|
||||
// createPodDirectory creates pod directory.
|
||||
func createPodDirectory() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e_node/services/logs.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e_node/services/logs.go
generated
vendored
@ -44,12 +44,6 @@ func (l *logFiles) String() string {
|
||||
|
||||
// Set function of flag.Value
|
||||
func (l *logFiles) Set(value string) error {
|
||||
// Someone else is calling flag.Parse after the flags are parsed in the
|
||||
// test framework. Use this to avoid the flag being parsed twice.
|
||||
// TODO(random-liu): Figure out who is parsing the flags.
|
||||
if flag.Parsed() {
|
||||
return nil
|
||||
}
|
||||
var log LogFileData
|
||||
if err := json.Unmarshal([]byte(value), &log); err != nil {
|
||||
return err
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e_node/services/namespace_controller.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e_node/services/namespace_controller.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
|
||||
)
|
||||
|
||||
@ -50,18 +49,25 @@ func NewNamespaceController(host string) *NamespaceController {
|
||||
|
||||
// Start starts the namespace controller.
|
||||
func (n *NamespaceController) Start() error {
|
||||
// Use the default QPS
|
||||
config := restclient.AddUserAgent(&restclient.Config{Host: n.host}, ncName)
|
||||
|
||||
// the namespace cleanup controller is very chatty. It makes lots of discovery calls and then it makes lots of delete calls.
|
||||
config.QPS = 50
|
||||
config.Burst = 200
|
||||
|
||||
client, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientPool := dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
discoverResourcesFn := client.Discovery().ServerPreferredNamespacedResources
|
||||
informerFactory := informers.NewSharedInformerFactory(client, ncResyncPeriod)
|
||||
nc := namespacecontroller.NewNamespaceController(
|
||||
client,
|
||||
clientPool,
|
||||
dynamicClient,
|
||||
discoverResourcesFn,
|
||||
informerFactory.Core().V1().Namespaces(),
|
||||
ncResyncPeriod, v1.FinalizerKubernetes,
|
||||
|
71
vendor/k8s.io/kubernetes/test/e2e_node/simple_mount.go
generated
vendored
71
vendor/k8s.io/kubernetes/test/e2e_node/simple_mount.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("SimpleMount", func() {
|
||||
f := framework.NewDefaultFramework("simple-mount-test")
|
||||
|
||||
// This is a very simple test that exercises the Kubelet's mounter code path.
|
||||
// If the mount fails, the pod will not be able to run, and CreateSync will timeout.
|
||||
It("should be able to mount an emptydir on a container", func() {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-mount-pod",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "simple-mount-container",
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "simply-mounted-volume",
|
||||
MountPath: "/opt/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "simply-mounted-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
Medium: "Memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podClient := f.PodClient()
|
||||
pod = podClient.CreateSync(pod)
|
||||
|
||||
})
|
||||
})
|
39
vendor/k8s.io/kubernetes/test/e2e_node/summary_test.go
generated
vendored
39
vendor/k8s.io/kubernetes/test/e2e_node/summary_test.go
generated
vendored
@ -19,6 +19,7 @@ package e2e_node
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -35,7 +36,7 @@ import (
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Summary API", func() {
|
||||
var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
||||
f := framework.NewDefaultFramework("summary-test")
|
||||
Context("when querying /stats/summary", func() {
|
||||
AfterEach(func() {
|
||||
@ -112,15 +113,39 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
"Time": recent(maxStatsAge),
|
||||
// Pods are limited by Node Allocatable
|
||||
"AvailableBytes": bounded(1*framework.Kb, memoryLimit),
|
||||
"UsageBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||
"WorkingSetBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||
"UsageBytes": bounded(10*framework.Kb, 50*framework.Mb),
|
||||
"WorkingSetBytes": bounded(10*framework.Kb, 50*framework.Mb),
|
||||
"RSSBytes": bounded(1*framework.Kb, 20*framework.Mb),
|
||||
"PageFaults": bounded(0, 1000000),
|
||||
"MajorPageFaults": bounded(0, 10),
|
||||
})
|
||||
runtimeContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
|
||||
if systemdutil.IsRunningSystemd() && framework.TestContext.ContainerRuntime == "docker" {
|
||||
// Some Linux distributions still ship a docker.service that is missing
|
||||
// a `Delegate=yes` setting (or equivalent CPUAccounting= and MemoryAccounting=)
|
||||
// that allows us to monitor the container runtime resource usage through
|
||||
// the "cpu" and "memory" cgroups.
|
||||
//
|
||||
// Make an exception here for those distros, only for Docker, so that they
|
||||
// can pass the full node e2e tests even in that case.
|
||||
//
|
||||
// For newer container runtimes (using CRI) and even distros that still
|
||||
// ship Docker, we should encourage them to always set `Delegate=yes` in
|
||||
// order to make monitoring of the runtime possible.
|
||||
stdout, err := exec.Command("systemctl", "show", "-p", "Delegate", "docker.service").CombinedOutput()
|
||||
if err == nil && strings.TrimSpace(string(stdout)) == "Delegate=no" {
|
||||
// Only make these optional if we can successfully confirm that
|
||||
// Delegate is set to "no" (in other words, unset.) If we fail
|
||||
// to check that, default to requiring it, which might cause
|
||||
// false positives, but that should be the safer approach.
|
||||
By("Making runtime container expectations optional, since systemd was not configured to Delegate=yes the cgroups")
|
||||
runtimeContExpectations.Fields["Memory"] = Or(BeNil(), runtimeContExpectations.Fields["Memory"])
|
||||
runtimeContExpectations.Fields["CPU"] = Or(BeNil(), runtimeContExpectations.Fields["CPU"])
|
||||
}
|
||||
}
|
||||
systemContainers := gstruct.Elements{
|
||||
"kubelet": sysContExpectations(),
|
||||
"runtime": sysContExpectations(),
|
||||
"runtime": runtimeContExpectations,
|
||||
"pods": podsContExpectations,
|
||||
}
|
||||
// The Kubelet only manages the 'misc' system container if the host is not running systemd.
|
||||
@ -150,7 +175,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
"StartTime": recent(maxStartAge),
|
||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"UsageNanoCores": bounded(100000, 1E9),
|
||||
"UsageNanoCores": bounded(10000, 1E9),
|
||||
"UsageCoreNanoSeconds": bounded(10000000, 1E11),
|
||||
}),
|
||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||
@ -197,7 +222,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
||||
}),
|
||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"UsageNanoCores": bounded(100000, 1E9),
|
||||
"UsageNanoCores": bounded(10000, 1E9),
|
||||
"UsageCoreNanoSeconds": bounded(10000000, 1E11),
|
||||
}),
|
||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||
@ -324,7 +349,7 @@ func getSummaryTestPods(f *framework.Framework, numRestarts int32, names ...stri
|
||||
{
|
||||
Name: "busybox-container",
|
||||
Image: busyboxImage,
|
||||
Command: getRestartingContainerCommand("/test-empty-dir-mnt", 0, numRestarts, "ping -c 1 google.com; echo 'hello world' >> /test-empty-dir-mnt/file;"),
|
||||
Command: getRestartingContainerCommand("/test-empty-dir-mnt", 0, numRestarts, "echo 'some bytes' >/outside_the_volume.txt; ping -c 1 google.com; echo 'hello world' >> /test-empty-dir-mnt/file;"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
// Must set memory limit to get MemoryStats.AvailableBytes
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e_node/system/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e_node/system/BUILD
generated
vendored
@ -60,7 +60,6 @@ go_library(
|
||||
"//vendor/github.com/docker/docker/api/types:go_default_library",
|
||||
"//vendor/github.com/docker/docker/client:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/system/docker_validator.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/system/docker_validator.go
generated
vendored
@ -17,12 +17,12 @@ limitations under the License.
|
||||
package system
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var _ Validator = &DockerValidator{}
|
||||
|
90
vendor/k8s.io/kubernetes/test/e2e_node/util.go
generated
vendored
90
vendor/k8s.io/kubernetes/test/e2e_node/util.go
generated
vendored
@ -32,19 +32,20 @@ import (
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -57,6 +58,11 @@ var startServices = flag.Bool("start-services", true, "If true, start local node
|
||||
var stopServices = flag.Bool("stop-services", true, "If true, stop local node services after running tests")
|
||||
var busyboxImage = "busybox"
|
||||
|
||||
const (
|
||||
// Kubelet internal cgroup name for node allocatable cgroup.
|
||||
defaultNodeAllocatableCgroup = "kubepods"
|
||||
)
|
||||
|
||||
func getNodeSummary() (*stats.Summary, error) {
|
||||
req, err := http.NewRequest("GET", *kubeletAddress+"/stats/summary", nil)
|
||||
if err != nil {
|
||||
@ -132,7 +138,7 @@ func isKubeletConfigEnabled(f *framework.Framework) (bool, error) {
|
||||
}
|
||||
v, ok := cfgz.FeatureGates[string(features.DynamicKubeletConfig)]
|
||||
if !ok {
|
||||
return false, nil
|
||||
return true, nil
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
@ -164,10 +170,10 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
|
||||
// create the reference and set Node.Spec.ConfigSource
|
||||
src := &apiv1.NodeConfigSource{
|
||||
ConfigMapRef: &apiv1.ObjectReference{
|
||||
Namespace: "kube-system",
|
||||
Name: cm.Name,
|
||||
UID: cm.UID,
|
||||
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
Namespace: "kube-system",
|
||||
Name: cm.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
},
|
||||
}
|
||||
|
||||
@ -219,17 +225,6 @@ func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getKubeletConfigOkCondition returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getKubeletConfigOkCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == apiv1.NodeKubeletConfigOk {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Causes the test to fail, or returns a status 200 response from the /configz endpoint
|
||||
func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Response {
|
||||
endpoint := fmt.Sprintf("http://127.0.0.1:8080/api/v1/nodes/%s/proxy/configz", framework.TestContext.NodeName)
|
||||
@ -295,17 +290,7 @@ func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletCo
|
||||
|
||||
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Always uses GenerateName to generate a suffix.
|
||||
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *apiv1.ConfigMap {
|
||||
scheme, _, err := kubeletscheme.NewSchemeAndCodecs()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
versioned := &kubeletconfigv1beta1.KubeletConfiguration{}
|
||||
err = scheme.Convert(internalKC, versioned, nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
encoder, err := newKubeletConfigJSONEncoder()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
data, err := runtime.Encode(encoder, versioned)
|
||||
data, err := kubeletconfigcodec.EncodeKubeletConfig(internalKC, kubeletconfigv1beta1.SchemeGroupVersion)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cmap := &apiv1.ConfigMap{
|
||||
@ -335,32 +320,38 @@ func getLocalNode(f *framework.Framework) *apiv1.Node {
|
||||
return &nodeList.Items[0]
|
||||
}
|
||||
|
||||
// logs prometheus metrics from the local kubelet.
|
||||
func logKubeletMetrics(metricKeys ...string) {
|
||||
// logKubeletLatencyMetrics logs KubeletLatencyMetrics computed from the Prometheus
|
||||
// metrics exposed on the current node and identified by the metricNames.
|
||||
// The Kubelet subsystem prefix is automatically prepended to these metric names.
|
||||
func logKubeletLatencyMetrics(metricNames ...string) {
|
||||
metricSet := sets.NewString()
|
||||
for _, key := range metricKeys {
|
||||
for _, key := range metricNames {
|
||||
metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key)
|
||||
}
|
||||
metric, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName + ":10255")
|
||||
if err != nil {
|
||||
framework.Logf("Error getting kubelet metrics: %v", err)
|
||||
} else {
|
||||
framework.Logf("Kubelet Metrics: %+v", framework.GetKubeletMetrics(metric, metricSet))
|
||||
framework.Logf("Kubelet Metrics: %+v", framework.GetKubeletLatencyMetrics(metric, metricSet))
|
||||
}
|
||||
}
|
||||
|
||||
func newKubeletConfigJSONEncoder() (runtime.Encoder, error) {
|
||||
_, kubeletCodecs, err := kubeletscheme.NewSchemeAndCodecs()
|
||||
// returns config related metrics from the local kubelet, filtered to the filterMetricNames passed in
|
||||
func getKubeletMetrics(filterMetricNames sets.String) (frameworkmetrics.KubeletMetrics, error) {
|
||||
// grab Kubelet metrics
|
||||
ms, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName + ":10255")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mediaType := "application/json"
|
||||
info, ok := runtime.SerializerInfoForMediaType(kubeletCodecs.SupportedMediaTypes(), mediaType)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported media type %q", mediaType)
|
||||
filtered := metrics.NewKubeletMetrics()
|
||||
for name := range ms {
|
||||
if !filterMetricNames.Has(name) {
|
||||
continue
|
||||
}
|
||||
filtered[name] = ms[name]
|
||||
}
|
||||
return kubeletCodecs.EncoderForVersion(info.Serializer, kubeletconfigv1beta1.SchemeGroupVersion), nil
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// runCommand runs the cmd and returns the combined stdout and stderr, or an
|
||||
@ -399,7 +390,7 @@ func getCRIClient() (internalapi.RuntimeService, internalapi.ImageManagerService
|
||||
func restartKubelet() {
|
||||
stdout, err := exec.Command("sudo", "systemctl", "list-units", "kubelet*", "--state=running").CombinedOutput()
|
||||
framework.ExpectNoError(err)
|
||||
regex := regexp.MustCompile("(kubelet-[0-9]+)")
|
||||
regex := regexp.MustCompile("(kubelet-\\w+)")
|
||||
matches := regex.FindStringSubmatch(string(stdout))
|
||||
Expect(len(matches)).NotTo(BeZero())
|
||||
kube := matches[0]
|
||||
@ -407,3 +398,20 @@ func restartKubelet() {
|
||||
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout)
|
||||
}
|
||||
|
||||
func toCgroupFsName(cgroupName cm.CgroupName) string {
|
||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
||||
return cgroupName.ToSystemd()
|
||||
} else {
|
||||
return cgroupName.ToCgroupfs()
|
||||
}
|
||||
}
|
||||
|
||||
// reduceAllocatableMemoryUsage uses memory.force_empty (https://lwn.net/Articles/432224/)
|
||||
// to make the kernel reclaim memory in the allocatable cgroup
|
||||
// the time to reduce pressure may be unbounded, but usually finishes within a second
|
||||
func reduceAllocatableMemoryUsage() {
|
||||
cmd := fmt.Sprintf("echo 0 > /sys/fs/cgroup/memory/%s/memory.force_empty", toCgroupFsName(cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup)))
|
||||
_, err := exec.Command("sudo", "sh", "-c", cmd).CombinedOutput()
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/volume_manager_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/volume_manager_test.go
generated
vendored
@ -34,7 +34,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-volume-manager")
|
||||
Describe("Volume Manager", func() {
|
||||
Context("On terminatation of pod with memory backed volume", func() {
|
||||
It("should remove the volume from the node", func() {
|
||||
It("should remove the volume from the node [NodeConformance]", func() {
|
||||
var (
|
||||
memoryBackedPod *v1.Pod
|
||||
volumeName string
|
||||
|
Reference in New Issue
Block a user