mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
207
vendor/k8s.io/kubernetes/test/e2e_node/BUILD
generated
vendored
Normal file
207
vendor/k8s.io/kubernetes/test/e2e_node/BUILD
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"container.go",
|
||||
"doc.go",
|
||||
"docker_util.go",
|
||||
"framework.go",
|
||||
"gpu_device_plugin.go",
|
||||
"gpus.go",
|
||||
"image_list.go",
|
||||
"simple_mount.go",
|
||||
"util.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"benchmark_util.go",
|
||||
"node_problem_detector_linux.go",
|
||||
"resource_collector.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/remote:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
"//vendor/github.com/coreos/go-systemd/util:go_default_library",
|
||||
"//vendor/github.com/docker/docker/client:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/e2e_node/perftype:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/client/v2:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"apparmor_test.go",
|
||||
"cpu_manager_test.go",
|
||||
"critical_pod_test.go",
|
||||
"docker_test.go",
|
||||
"dockershim_checkpoint_test.go",
|
||||
"dynamic_kubelet_config_test.go",
|
||||
"e2e_node_suite_test.go",
|
||||
"eviction_test.go",
|
||||
"garbage_collector_test.go",
|
||||
"gke_environment_test.go",
|
||||
"image_id_test.go",
|
||||
"kubelet_test.go",
|
||||
"lifecycle_hook_test.go",
|
||||
"log_path_test.go",
|
||||
"memory_eviction_test.go",
|
||||
"mirror_pod_test.go",
|
||||
"pods_container_manager_test.go",
|
||||
"runtime_conformance_test.go",
|
||||
"security_context_test.go",
|
||||
"summary_test.go",
|
||||
"volume_manager_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"container_manager_test.go",
|
||||
"density_test.go",
|
||||
"node_container_manager_test.go",
|
||||
"resource_usage_test.go",
|
||||
"restart_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node",
|
||||
library = ":go_default_library",
|
||||
tags = ["e2e"],
|
||||
deps = [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/status:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e_node/services:go_default_library",
|
||||
"//test/e2e_node/system:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
"//vendor/github.com/coreos/go-systemd/util:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/gstruct:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
genrule(
|
||||
name = "gen_e2e_node.test",
|
||||
testonly = 1,
|
||||
srcs = [":go_default_test"],
|
||||
outs = ["e2e_node.test"],
|
||||
cmd = "srcs=($(SRCS)); cp $$(dirname $${srcs[0]})/go_default_test $@;",
|
||||
output_to_bindir = 1,
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e_node/builder:all-srcs",
|
||||
"//test/e2e_node/environment:all-srcs",
|
||||
"//test/e2e_node/perftype:all-srcs",
|
||||
"//test/e2e_node/remote:all-srcs",
|
||||
"//test/e2e_node/runner/local:all-srcs",
|
||||
"//test/e2e_node/runner/remote:all-srcs",
|
||||
"//test/e2e_node/services:all-srcs",
|
||||
"//test/e2e_node/system:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
8
vendor/k8s.io/kubernetes/test/e2e_node/OWNERS
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/test/e2e_node/OWNERS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
approvers:
|
||||
- Random-Liu
|
||||
- tallclair
|
||||
- vishh
|
||||
- derekwaynecarr
|
||||
- yujuhong
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
3
vendor/k8s.io/kubernetes/test/e2e_node/README.md
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/test/e2e_node/README.md
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
See [e2e-node-tests](https://git.k8s.io/community/contributors/devel/e2e-node-tests.md)
|
||||
|
||||
[]()
|
221
vendor/k8s.io/kubernetes/test/e2e_node/apparmor_test.go
generated
vendored
Normal file
221
vendor/k8s.io/kubernetes/test/e2e_node/apparmor_test.go
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() {
|
||||
if isAppArmorEnabled() {
|
||||
BeforeEach(func() {
|
||||
By("Loading AppArmor profiles for testing")
|
||||
framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
|
||||
})
|
||||
Context("when running with AppArmor", func() {
|
||||
f := framework.NewDefaultFramework("apparmor-test")
|
||||
|
||||
It("should reject an unloaded profile", func() {
|
||||
status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existant-profile")
|
||||
expectSoftRejection(status)
|
||||
})
|
||||
It("should enforce a profile blocking writes", func() {
|
||||
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
|
||||
if len(status.ContainerStatuses) == 0 {
|
||||
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
|
||||
return
|
||||
}
|
||||
state := status.ContainerStatuses[0].State.Terminated
|
||||
Expect(state).ToNot(BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
|
||||
Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)
|
||||
|
||||
})
|
||||
It("should enforce a permissive profile", func() {
|
||||
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
|
||||
if len(status.ContainerStatuses) == 0 {
|
||||
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
|
||||
return
|
||||
}
|
||||
state := status.ContainerStatuses[0].State.Terminated
|
||||
Expect(state).ToNot(BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
|
||||
Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
|
||||
})
|
||||
})
|
||||
} else {
|
||||
Context("when running without AppArmor", func() {
|
||||
f := framework.NewDefaultFramework("apparmor-test")
|
||||
|
||||
It("should reject a pod with an AppArmor profile", func() {
|
||||
status := runAppArmorTest(f, false, apparmor.ProfileRuntimeDefault)
|
||||
expectSoftRejection(status)
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const apparmorProfilePrefix = "e2e-node-apparmor-test-"
|
||||
const testProfiles = `
|
||||
#include <tunables/global>
|
||||
|
||||
profile e2e-node-apparmor-test-deny-write flags=(attach_disconnected) {
|
||||
#include <abstractions/base>
|
||||
|
||||
file,
|
||||
|
||||
# Deny all file writes.
|
||||
deny /** w,
|
||||
}
|
||||
|
||||
profile e2e-node-apparmor-test-audit-write flags=(attach_disconnected) {
|
||||
#include <abstractions/base>
|
||||
|
||||
file,
|
||||
|
||||
# Only audit file writes.
|
||||
audit /** w,
|
||||
}
|
||||
`
|
||||
|
||||
func loadTestProfiles() error {
|
||||
f, err := ioutil.TempFile("/tmp", "apparmor")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open temp file: %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
|
||||
if _, err := f.WriteString(testProfiles); err != nil {
|
||||
return fmt.Errorf("failed to write profiles to file: %v", err)
|
||||
}
|
||||
|
||||
// TODO(random-liu): The test is run as root now, no need to use sudo here.
|
||||
cmd := exec.Command("sudo", "apparmor_parser", "-r", "-W", f.Name())
|
||||
stderr := &bytes.Buffer{}
|
||||
cmd.Stderr = stderr
|
||||
out, err := cmd.Output()
|
||||
// apparmor_parser does not always return an error code, so consider any stderr output an error.
|
||||
if err != nil || stderr.Len() > 0 {
|
||||
if stderr.Len() > 0 {
|
||||
glog.Warning(stderr.String())
|
||||
}
|
||||
if len(out) > 0 {
|
||||
glog.Infof("apparmor_parser: %s", out)
|
||||
}
|
||||
return fmt.Errorf("failed to load profiles: %v", err)
|
||||
}
|
||||
glog.V(2).Infof("Loaded profiles: %v", out)
|
||||
return nil
|
||||
}
|
||||
|
||||
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
|
||||
pod := createPodWithAppArmor(f, profile)
|
||||
if shouldRun {
|
||||
// The pod needs to start before it stops, so wait for the longer start timeout.
|
||||
framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
|
||||
f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
} else {
|
||||
// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
|
||||
w, err := f.PodClient().Watch(metav1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
|
||||
framework.ExpectNoError(err)
|
||||
_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
|
||||
switch e.Type {
|
||||
case watch.Deleted:
|
||||
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
|
||||
}
|
||||
switch t := e.Object.(type) {
|
||||
case *v1.Pod:
|
||||
if t.Status.Reason == "AppArmor" {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return p.Status
|
||||
}
|
||||
|
||||
func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)),
|
||||
Annotations: map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + "test": profile,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "test",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"touch", "foo"},
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
return f.PodClient().Create(pod)
|
||||
}
|
||||
|
||||
func expectSoftRejection(status v1.PodStatus) {
|
||||
args := []interface{}{"PodStatus: %+v", status}
|
||||
Expect(status.Phase).To(Equal(v1.PodPending), args...)
|
||||
Expect(status.Reason).To(Equal("AppArmor"), args...)
|
||||
Expect(status.Message).To(ContainSubstring("AppArmor"), args...)
|
||||
Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(Equal("Blocked"), args...)
|
||||
}
|
||||
|
||||
func isAppArmorEnabled() bool {
|
||||
// TODO(tallclair): Pass this through the image setup rather than hardcoding.
|
||||
if strings.Contains(framework.TestContext.NodeName, "-gci-dev-") {
|
||||
gciVersionRe := regexp.MustCompile("-gci-dev-([0-9]+)-")
|
||||
matches := gciVersionRe.FindStringSubmatch(framework.TestContext.NodeName)
|
||||
if len(matches) == 2 {
|
||||
version, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
glog.Errorf("Error parsing GCI version from NodeName %q: %v", framework.TestContext.NodeName, err)
|
||||
return false
|
||||
}
|
||||
return version >= 54
|
||||
}
|
||||
return false
|
||||
}
|
||||
if strings.Contains(framework.TestContext.NodeName, "-ubuntu-") {
|
||||
return true
|
||||
}
|
||||
return apparmor.IsAppArmorEnabled()
|
||||
}
|
191
vendor/k8s.io/kubernetes/test/e2e_node/benchmark_util.go
generated
vendored
Normal file
191
vendor/k8s.io/kubernetes/test/e2e_node/benchmark_util.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/perftype"
|
||||
nodeperftype "k8s.io/kubernetes/test/e2e_node/perftype"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
TimeSeriesTag = "[Result:TimeSeries]"
|
||||
TimeSeriesEnd = "[Finish:TimeSeries]"
|
||||
)
|
||||
|
||||
// dumpDataToFile inserts the current timestamp into the labels and writes the
|
||||
// data for the test into the file with the specified prefix.
|
||||
func dumpDataToFile(data interface{}, labels map[string]string, prefix string) {
|
||||
testName := labels["test"]
|
||||
fileName := path.Join(framework.TestContext.ReportDir, fmt.Sprintf("%s-%s-%s.json", prefix, framework.TestContext.ReportPrefix, testName))
|
||||
labels["timestamp"] = strconv.FormatInt(time.Now().UTC().Unix(), 10)
|
||||
framework.Logf("Dumping perf data for test %q to %q.", testName, fileName)
|
||||
if err := ioutil.WriteFile(fileName, []byte(framework.PrettyPrintJSON(data)), 0644); err != nil {
|
||||
framework.Logf("Failed to write perf data for test %q to %q: %v", testName, fileName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// logPerfData writes the perf data to a standalone json file if the
|
||||
// framework.TestContext.ReportDir is non-empty, or to the general build log
|
||||
// otherwise. The perfType identifies which type of the perf data it is, such
|
||||
// as "cpu" and "memory". If an error occurs, no perf data will be logged.
|
||||
func logPerfData(p *perftype.PerfData, perfType string) {
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
framework.PrintPerfData(p)
|
||||
return
|
||||
}
|
||||
dumpDataToFile(p, p.Labels, "performance-"+perfType)
|
||||
}
|
||||
|
||||
// logDensityTimeSeries writes the time series data of operation and resource
|
||||
// usage to a standalone json file if the framework.TestContext.ReportDir is
|
||||
// non-empty, or to the general build log otherwise. If an error occurs,
|
||||
// no perf data will be logged.
|
||||
func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]metav1.Time, testInfo map[string]string) {
|
||||
timeSeries := &nodeperftype.NodeTimeSeries{
|
||||
Labels: testInfo,
|
||||
Version: framework.CurrentKubeletPerfMetricsVersion,
|
||||
}
|
||||
// Attach operation time series.
|
||||
timeSeries.OperationData = map[string][]int64{
|
||||
"create": getCumulatedPodTimeSeries(create),
|
||||
"running": getCumulatedPodTimeSeries(watch),
|
||||
}
|
||||
// Attach resource time series.
|
||||
timeSeries.ResourceData = rc.GetResourceTimeSeries()
|
||||
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
framework.Logf("%s %s\n%s", TimeSeriesTag, framework.PrettyPrintJSON(timeSeries), TimeSeriesEnd)
|
||||
return
|
||||
}
|
||||
dumpDataToFile(timeSeries, timeSeries.Labels, "time_series")
|
||||
}
|
||||
|
||||
type int64arr []int64
|
||||
|
||||
func (a int64arr) Len() int { return len(a) }
|
||||
func (a int64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a int64arr) Less(i, j int) bool { return a[i] < a[j] }
|
||||
|
||||
// getCumulatedPodTimeSeries gets the cumulative pod number time series.
|
||||
func getCumulatedPodTimeSeries(timePerPod map[string]metav1.Time) []int64 {
|
||||
timeSeries := make(int64arr, 0)
|
||||
for _, ts := range timePerPod {
|
||||
timeSeries = append(timeSeries, ts.Time.UnixNano())
|
||||
}
|
||||
// Sort all timestamps.
|
||||
sort.Sort(timeSeries)
|
||||
return timeSeries
|
||||
}
|
||||
|
||||
// getLatencyPerfData returns perf data of pod startup latency.
|
||||
func getLatencyPerfData(latency framework.LatencyMetric, testInfo map[string]string) *perftype.PerfData {
|
||||
return &perftype.PerfData{
|
||||
Version: framework.CurrentKubeletPerfMetricsVersion,
|
||||
DataItems: []perftype.DataItem{
|
||||
{
|
||||
Data: map[string]float64{
|
||||
"Perc50": float64(latency.Perc50) / 1000000,
|
||||
"Perc90": float64(latency.Perc90) / 1000000,
|
||||
"Perc99": float64(latency.Perc99) / 1000000,
|
||||
"Perc100": float64(latency.Perc100) / 1000000,
|
||||
},
|
||||
Unit: "ms",
|
||||
Labels: map[string]string{
|
||||
"datatype": "latency",
|
||||
"latencytype": "create-pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
Labels: testInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// getThroughputPerfData returns perf data of pod creation startup throughput.
|
||||
func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testInfo map[string]string) *perftype.PerfData {
|
||||
return &perftype.PerfData{
|
||||
Version: framework.CurrentKubeletPerfMetricsVersion,
|
||||
DataItems: []perftype.DataItem{
|
||||
{
|
||||
Data: map[string]float64{
|
||||
"batch": float64(podsNr) / batchLag.Minutes(),
|
||||
"single-worst": 1.0 / e2eLags[len(e2eLags)-1].Latency.Minutes(),
|
||||
},
|
||||
Unit: "pods/min",
|
||||
Labels: map[string]string{
|
||||
"datatype": "throughput",
|
||||
"latencytype": "create-pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
Labels: testInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// getTestNodeInfo returns a label map containing the test name and
|
||||
// description, the name of the node on which the test will be run, the image
|
||||
// name of the node, and the node capacities.
|
||||
func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[string]string {
|
||||
nodeName := framework.TestContext.NodeName
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cpu, ok := node.Status.Capacity[v1.ResourceCPU]
|
||||
if !ok {
|
||||
framework.Failf("Fail to fetch CPU capacity value of test node.")
|
||||
}
|
||||
|
||||
memory, ok := node.Status.Capacity[v1.ResourceMemory]
|
||||
if !ok {
|
||||
framework.Failf("Fail to fetch Memory capacity value of test node.")
|
||||
}
|
||||
|
||||
cpuValue, ok := cpu.AsInt64()
|
||||
if !ok {
|
||||
framework.Failf("Fail to fetch CPU capacity value as Int64.")
|
||||
}
|
||||
|
||||
memoryValue, ok := memory.AsInt64()
|
||||
if !ok {
|
||||
framework.Failf("Fail to fetch Memory capacity value as Int64.")
|
||||
}
|
||||
|
||||
image := node.Status.NodeInfo.OSImage
|
||||
if framework.TestContext.ImageDescription != "" {
|
||||
image = fmt.Sprintf("%s (%s)", image, framework.TestContext.ImageDescription)
|
||||
}
|
||||
return map[string]string{
|
||||
"node": nodeName,
|
||||
"test": testName,
|
||||
"image": image,
|
||||
"machine": fmt.Sprintf("cpu:%dcore,memory:%.1fGB", cpuValue, float32(memoryValue)/(1024*1024*1024)),
|
||||
"desc": testDesc,
|
||||
}
|
||||
}
|
26
vendor/k8s.io/kubernetes/test/e2e_node/builder/BUILD
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/e2e_node/builder/BUILD
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["build.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/builder",
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
123
vendor/k8s.io/kubernetes/test/e2e_node/builder/build.go
generated
vendored
Normal file
123
vendor/k8s.io/kubernetes/test/e2e_node/builder/build.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var k8sBinDir = flag.String("k8s-bin-dir", "", "Directory containing k8s kubelet binaries.")
|
||||
|
||||
var buildTargets = []string{
|
||||
"cmd/kubelet",
|
||||
"test/e2e_node/e2e_node.test",
|
||||
"vendor/github.com/onsi/ginkgo/ginkgo",
|
||||
"cluster/gce/gci/mounter",
|
||||
}
|
||||
|
||||
func BuildGo() error {
|
||||
glog.Infof("Building k8s binaries...")
|
||||
k8sRoot, err := GetK8sRootDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes root directory %v.", err)
|
||||
}
|
||||
targets := strings.Join(buildTargets, " ")
|
||||
cmd := exec.Command("make", "-C", k8sRoot, fmt.Sprintf("WHAT=%s", targets))
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build go packages %v\n", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getK8sBin(bin string) (string, error) {
|
||||
// Use commandline specified path
|
||||
if *k8sBinDir != "" {
|
||||
absPath, err := filepath.Abs(*k8sBinDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(*k8sBinDir, bin)); err != nil {
|
||||
return "", fmt.Errorf("Could not find %s under directory %s.", bin, absPath)
|
||||
}
|
||||
return filepath.Join(absPath, bin), nil
|
||||
}
|
||||
|
||||
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Could not find absolute path of directory containing the tests %s.", filepath.Dir(os.Args[0]))
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(path, bin)); err == nil {
|
||||
return filepath.Join(path, bin), nil
|
||||
}
|
||||
|
||||
buildOutputDir, err := GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(buildOutputDir, bin)); err == nil {
|
||||
return filepath.Join(buildOutputDir, bin), nil
|
||||
}
|
||||
|
||||
// Give up with error
|
||||
return "", fmt.Errorf("Unable to locate %s. Can be defined using --k8s-path.", bin)
|
||||
}
|
||||
|
||||
// TODO: Dedup / merge this with comparable utilities in e2e/util.go
|
||||
func GetK8sRootDir() (string, error) {
|
||||
// Get the directory of the current executable
|
||||
_, testExec, _, _ := runtime.Caller(0)
|
||||
path := filepath.Dir(testExec)
|
||||
|
||||
// Look for the kubernetes source root directory
|
||||
if strings.Contains(path, "k8s.io/kubernetes") {
|
||||
splitPath := strings.Split(path, "k8s.io/kubernetes")
|
||||
return filepath.Join(splitPath[0], "k8s.io/kubernetes/"), nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Could not find kubernetes source root directory.")
|
||||
}
|
||||
|
||||
func GetK8sBuildOutputDir() (string, error) {
|
||||
k8sRoot, err := GetK8sRootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buildOutputDir := filepath.Join(k8sRoot, "_output/local/go/bin")
|
||||
if _, err := os.Stat(buildOutputDir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buildOutputDir, nil
|
||||
}
|
||||
|
||||
func GetKubeletServerBin() string {
|
||||
bin, err := getK8sBin("kubelet")
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not locate kubelet binary %v.", err)
|
||||
}
|
||||
return bin
|
||||
}
|
49
vendor/k8s.io/kubernetes/test/e2e_node/conformance/build/Dockerfile
generated
vendored
Normal file
49
vendor/k8s.io/kubernetes/test/e2e_node/conformance/build/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM BASEIMAGE
|
||||
|
||||
COPY ginkgo /usr/local/bin/
|
||||
COPY e2e_node.test /usr/local/bin
|
||||
|
||||
# This is a placeholder that will be substituted in the Makefile.
|
||||
COPY_SYSTEM_SPEC_FILE
|
||||
|
||||
# The following environment variables can be override when starting the container.
|
||||
# FOCUS is regex matching test to run. By default run all conformance test.
|
||||
# SKIP is regex matching test to skip. By default skip flaky and serial test.
|
||||
# PARALLELISM is the number of processes the test will run in parallel.
|
||||
# REPORT_PATH is the path in the container to save test result and logs.
|
||||
# FLAKE_ATTEMPTS is the time to retry when there is a test failure. By default 2.
|
||||
# TEST_ARGS is the test arguments passed into the test.
|
||||
ENV FOCUS="\[Conformance\]" \
|
||||
SKIP="\[Flaky\]|\[Serial\]" \
|
||||
PARALLELISM=8 \
|
||||
REPORT_PATH="/var/result" \
|
||||
FLAKE_ATTEMPTS=2 \
|
||||
TEST_ARGS=""
|
||||
|
||||
ENTRYPOINT ginkgo --focus="$FOCUS" \
|
||||
--skip="$SKIP" \
|
||||
--nodes=$PARALLELISM \
|
||||
--flakeAttempts=$FLAKE_ATTEMPTS \
|
||||
/usr/local/bin/e2e_node.test \
|
||||
-- --conformance=true \
|
||||
--prepull-images=false \
|
||||
--report-dir="$REPORT_PATH" \
|
||||
# This is a placeholder that will be substituted in the Makefile.
|
||||
--system-spec-name=SYSTEM_SPEC_NAME \
|
||||
# This is a placeholder that will be substituted in the Makefile.
|
||||
--system-spec-file=SYSTEM_SPEC_FILE_PATH \
|
||||
$TEST_ARGS
|
85
vendor/k8s.io/kubernetes/test/e2e_node/conformance/build/Makefile
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/test/e2e_node/conformance/build/Makefile
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Build the node-test image.
|
||||
#
|
||||
# Usage:
|
||||
# [ARCH=amd64] [REGISTRY="gcr.io/google_containers"] [BIN_DIR="../../../../_output/bin"] make (build|push) VERSION={some_version_number e.g. 0.1}
|
||||
|
||||
# SYSTEM_SPEC_NAME is the name of the system spec used for the node conformance
|
||||
# test. The specs are expected to be in SYSTEM_SPEC_DIR.
|
||||
SYSTEM_SPEC_NAME?=
|
||||
SYSTEM_SPEC_DIR?=../../system/specs
|
||||
|
||||
# TODO(random-liu): Add this into release progress.
|
||||
REGISTRY?=gcr.io/google_containers
|
||||
ARCH?=amd64
|
||||
# BIN_DIR is the directory to find binaries, overwrite with ../../../../_output/bin
|
||||
# for local development.
|
||||
BIN_DIR?=../../../../_output/dockerized/bin/linux/${ARCH}
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
|
||||
BASEIMAGE_amd64=debian:jessie
|
||||
BASEIMAGE_arm=arm32v7/debian:jessie
|
||||
BASEIMAGE_arm64=arm64v8/debian:jessie
|
||||
BASEIMAGE_ppc64le=ppc64le/debian:jessie
|
||||
|
||||
BASEIMAGE?=${BASEIMAGE_${ARCH}}
|
||||
|
||||
IMAGE_NAME:=${REGISTRY}/node-test
|
||||
COPY_SYSTEM_SPEC_FILE=
|
||||
SYSTEM_SPEC_FILE_PATH=
|
||||
ifneq ($(strip $(SYSTEM_SPEC_NAME)),)
|
||||
IMAGE_NAME:=${IMAGE_NAME}-${SYSTEM_SPEC_NAME}
|
||||
COPY_SYSTEM_SPEC_FILE="'COPY system-spec.yaml /usr/local/etc/'"
|
||||
SYSTEM_SPEC_FILE_PATH="'/usr/local/etc/system-spec.yaml'"
|
||||
endif
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
|
||||
ifndef VERSION
|
||||
$(error VERSION is undefined)
|
||||
endif
|
||||
cp -r ./* ${TEMP_DIR}
|
||||
|
||||
cp ${BIN_DIR}/ginkgo ${TEMP_DIR}
|
||||
cp ${BIN_DIR}/e2e_node.test ${TEMP_DIR}
|
||||
ifneq ($(strip $(SYSTEM_SPEC_NAME)),)
|
||||
cp ${SYSTEM_SPEC_DIR}/${SYSTEM_SPEC_NAME}.yaml ${TEMP_DIR}/system-spec.yaml
|
||||
endif
|
||||
|
||||
cd ${TEMP_DIR} && sed -i.back \
|
||||
"s|BASEIMAGE|${BASEIMAGE}|g;\
|
||||
s|COPY_SYSTEM_SPEC_FILE|${COPY_SYSTEM_SPEC_FILE}|g;\
|
||||
s|SYSTEM_SPEC_NAME|${SYSTEM_SPEC_NAME}|g;\
|
||||
s|SYSTEM_SPEC_FILE_PATH|${SYSTEM_SPEC_FILE_PATH}|g" Dockerfile
|
||||
|
||||
# Make scripts executable before they are copied into the Docker image. If we make them executable later, in another layer
|
||||
# they'll take up twice the space because the new executable binary differs from the old one, but everything is cached in layers.
|
||||
cd ${TEMP_DIR} && chmod a+rx \
|
||||
e2e_node.test \
|
||||
ginkgo
|
||||
|
||||
docker build --pull -t ${IMAGE_NAME}-${ARCH}:${VERSION} ${TEMP_DIR}
|
||||
|
||||
push: build
|
||||
gcloud docker -- push ${IMAGE_NAME}-${ARCH}:${VERSION}
|
||||
ifeq ($(ARCH),amd64)
|
||||
docker tag ${IMAGE_NAME}-${ARCH}:${VERSION} ${IMAGE_NAME}:${VERSION}
|
||||
gcloud docker -- push ${IMAGE_NAME}:${VERSION}
|
||||
endif
|
||||
|
||||
.PHONY: all
|
209
vendor/k8s.io/kubernetes/test/e2e_node/conformance/run_test.sh
generated
vendored
Executable file
209
vendor/k8s.io/kubernetes/test/e2e_node/conformance/run_test.sh
generated
vendored
Executable file
@ -0,0 +1,209 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is only for demonstrating how to use the node test container. In
|
||||
# production environment, kubelet bootstrap will be more complicated, user
|
||||
# should configure the node test container accordingly.
|
||||
# In addition, this script will also be used in the node e2e test to let it use
|
||||
# the containerized test suite.
|
||||
|
||||
# TODO(random-liu): Use standard installer to install kubelet.
|
||||
# TODO(random-liu): Use standard tool to start kubelet in production way (such
|
||||
# as systemd, supervisord etc.)
|
||||
|
||||
# Refresh sudo credentials if not running on GCE.
|
||||
if ! ping -c 1 -q metadata.google.internal &> /dev/null; then
|
||||
sudo -v || exit 1
|
||||
fi
|
||||
|
||||
# FOCUS is ginkgo focus to select which tests to run. By default, FOCUS is
|
||||
# initialized as "\[Conformance\]" in the test container to run all conformance
|
||||
# test.
|
||||
FOCUS=${FOCUS:-""}
|
||||
|
||||
# SKIP is ginkgo skip to select which tests to skip. By default, SKIP is
|
||||
# initialized as "\[Flaky\]|\[Serial\]" in the test container skipping all
|
||||
# flaky and serial test.
|
||||
SKIP=${SKIP:-""}
|
||||
|
||||
# TEST_ARGS is the test arguments. It could be used to override default test
|
||||
# arguments in the container.
|
||||
TEST_ARGS=${TEST_ARGS:-""}
|
||||
|
||||
# REGISTRY is the image registry for node test image.
|
||||
REGISTRY=${REGISTRY:-"gcr.io/google_containers"}
|
||||
|
||||
# ARCH is the architecture of current machine, the script will use this to
|
||||
# select corresponding test container image.
|
||||
ARCH=${ARCH:-"amd64"}
|
||||
|
||||
# VERSION is the version of the test container image.
|
||||
VERSION=${VERSION:-"0.2"}
|
||||
|
||||
# KUBELET_BIN is the kubelet binary name. If it is not specified, use the
|
||||
# default binary name "kubelet".
|
||||
KUBELET_BIN=${KUBELET_BIN:-"kubelet"}
|
||||
|
||||
# KUBELET is the kubelet binary path. If it is not specified, assume kubelet is
|
||||
# in PATH.
|
||||
KUBELET=${KUBELET:-"`which $KUBELET_BIN`"}
|
||||
|
||||
# LOG_DIR is the absolute path of the directory where the test will collect all
|
||||
# logs to. By default, use the current directory.
|
||||
LOG_DIR=${LOG_DIR:-`pwd`}
|
||||
mkdir -p $LOG_DIR
|
||||
|
||||
# NETWORK_PLUGIN is the network plugin used by kubelet. Do not use network
|
||||
# plugin by default.
|
||||
NETWORK_PLUGIN=${NETWORK_PLUGIN:-""}
|
||||
|
||||
# CNI_CONF_DIR is the path to network plugin binaries.
|
||||
CNI_CONF_DIR=${CNI_CONF_DIR:-""}
|
||||
|
||||
# CNI_BIN_DIR is the path to network plugin config files.
|
||||
CNI_BIN_DIR=${CNI_BIN_DIR:-""}
|
||||
|
||||
# KUBELET_KUBECONFIG_DIR is the path to a dir for the kubelet's kubeconfig file
|
||||
KUBELET_KUBECONFIG=${KUBELET_KUBECONFIG:-"/var/lib/kubelet/kubeconfig"}
|
||||
|
||||
# Creates a kubeconfig file for the kubelet.
|
||||
# Args: address (e.g. "http://localhost:8080"), destination file path
|
||||
function create-kubelet-kubeconfig() {
|
||||
local api_addr="${1}"
|
||||
local dest="${2}"
|
||||
local dest_dir="$(dirname "${dest}")"
|
||||
mkdir -p "${dest_dir}" &>/dev/null || sudo mkdir -p "${dest_dir}"
|
||||
sudo=$(test -w "${dest_dir}" || echo "sudo -E")
|
||||
cat <<EOF | ${sudo} tee "${dest}" > /dev/null
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: ${api_addr}
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
name: local
|
||||
current-context: local
|
||||
EOF
|
||||
}
|
||||
|
||||
# start_kubelet starts kubelet and redirect kubelet log to $LOG_DIR/kubelet.log.
|
||||
kubelet_log=kubelet.log
|
||||
start_kubelet() {
|
||||
echo "Creating kubelet.kubeconfig"
|
||||
create-kubelet-kubeconfig "http://localhost:8080" $KUBELET_KUBECONFIG
|
||||
echo "Starting kubelet..."
|
||||
sudo -b $KUBELET $@ &>$LOG_DIR/$kubelet_log
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to start kubelet"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# wait_kubelet retries for 10 times for kubelet to be ready by checking http://127.0.0.1:10255/healthz.
|
||||
wait_kubelet() {
|
||||
echo "Health checking kubelet..."
|
||||
healthCheckURL=http://127.0.0.1:10255/healthz
|
||||
local maxRetry=10
|
||||
local cur=1
|
||||
while [ $cur -le $maxRetry ]; do
|
||||
curl -s $healthCheckURL > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Kubelet is ready"
|
||||
break
|
||||
fi
|
||||
if [ $cur -eq $maxRetry ]; then
|
||||
echo "Health check exceeds max retry"
|
||||
exit 1
|
||||
fi
|
||||
echo "Kubelet is not ready"
|
||||
sleep 1
|
||||
((cur++))
|
||||
done
|
||||
}
|
||||
|
||||
# kill_kubelet kills kubelet.
|
||||
kill_kubelet() {
|
||||
echo "Stopping kubelet..."
|
||||
sudo pkill $KUBELET_BIN
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to stop kubelet."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# run_test runs the node test container.
|
||||
run_test() {
|
||||
env=""
|
||||
if [ ! -z "$FOCUS" ]; then
|
||||
env="$env -e FOCUS=\"$FOCUS\""
|
||||
fi
|
||||
if [ ! -z "$SKIP" ]; then
|
||||
env="$env -e SKIP=\"$SKIP\""
|
||||
fi
|
||||
if [ ! -z "$TEST_ARGS" ]; then
|
||||
env="$env -e TEST_ARGS=\"$TEST_ARGS\""
|
||||
fi
|
||||
# The test assumes that inside the container:
|
||||
# * kubelet manifest path is mounted to the same path;
|
||||
# * log collect directory is mounted to /var/result;
|
||||
# * root file system is mounted to /rootfs.
|
||||
sudo sh -c "docker run -it --rm --privileged=true --net=host -v /:/rootfs \
|
||||
-v $config_dir:$config_dir -v $LOG_DIR:/var/result ${env} $REGISTRY/node-test-$ARCH:$VERSION"
|
||||
}
|
||||
|
||||
# Check whether kubelet is running. If kubelet is running, tell the user to stop
|
||||
# it before running the test.
|
||||
pid=`pidof $KUBELET_BIN`
|
||||
if [ ! -z $pid ]; then
|
||||
echo "Kubelet is running (pid=$pid), please stop it before running the test."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
volume_stats_agg_period=10s
|
||||
allow_privileged=true
|
||||
serialize_image_pulls=false
|
||||
config_dir=`mktemp -d`
|
||||
file_check_frequency=10s
|
||||
pod_cidr=10.100.0.0/24
|
||||
log_level=4
|
||||
start_kubelet --kubeconfig "${KUBELET_KUBECONFIG_DIR}/kubelet.kubeconfig" \
|
||||
--volume-stats-agg-period $volume_stats_agg_period \
|
||||
--allow-privileged=$allow_privileged \
|
||||
--serialize-image-pulls=$serialize_image_pulls \
|
||||
--pod-manifest-path $config_dir \
|
||||
--file-check-frequency $file_check_frequency \
|
||||
--pod-cidr=$pod_cidr \
|
||||
--runtime-cgroups=/docker-daemon \
|
||||
--kubelet-cgroups=/kubelet \
|
||||
--system-cgroups=/system \
|
||||
--cgroup-root=/ \
|
||||
--network-plugin=$NETWORK_PLUGIN \
|
||||
--cni-conf-dir=$CNI_CONF_DIR \
|
||||
--cni-bin-dir=$CNI_BIN_DIR \
|
||||
--v=$log_level \
|
||||
--logtostderr
|
||||
|
||||
wait_kubelet
|
||||
|
||||
run_test
|
||||
|
||||
kill_kubelet
|
||||
|
||||
# Clean up the kubelet config directory
|
||||
sudo rm -rf $config_dir
|
129
vendor/k8s.io/kubernetes/test/e2e_node/container.go
generated
vendored
Normal file
129
vendor/k8s.io/kubernetes/test/e2e_node/container.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// One pod one container
|
||||
// TODO: This should be migrated to the e2e framework.
|
||||
type ConformanceContainer struct {
|
||||
Container v1.Container
|
||||
RestartPolicy v1.RestartPolicy
|
||||
Volumes []v1.Volume
|
||||
ImagePullSecrets []string
|
||||
|
||||
PodClient *framework.PodClient
|
||||
podName string
|
||||
PodSecurityContext *v1.PodSecurityContext
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Create() {
|
||||
cc.podName = cc.Container.Name + string(uuid.NewUUID())
|
||||
imagePullSecrets := []v1.LocalObjectReference{}
|
||||
for _, s := range cc.ImagePullSecrets {
|
||||
imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: s})
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cc.podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: cc.RestartPolicy,
|
||||
Containers: []v1.Container{
|
||||
cc.Container,
|
||||
},
|
||||
SecurityContext: cc.PodSecurityContext,
|
||||
Volumes: cc.Volumes,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
},
|
||||
}
|
||||
cc.PodClient.Create(pod)
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Delete() error {
|
||||
return cc.PodClient.Delete(cc.podName, metav1.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) IsReady() (bool, error) {
|
||||
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return podutil.IsPodReady(pod), nil
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
|
||||
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return v1.PodUnknown, err
|
||||
}
|
||||
return pod.Status.Phase, nil
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
|
||||
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return v1.ContainerStatus{}, err
|
||||
}
|
||||
statuses := pod.Status.ContainerStatuses
|
||||
if len(statuses) != 1 || statuses[0].Name != cc.Container.Name {
|
||||
return v1.ContainerStatus{}, fmt.Errorf("unexpected container statuses %v", statuses)
|
||||
}
|
||||
return statuses[0], nil
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Present() (bool, error) {
|
||||
_, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
type ContainerState string
|
||||
|
||||
const (
|
||||
ContainerStateWaiting ContainerState = "Waiting"
|
||||
ContainerStateRunning ContainerState = "Running"
|
||||
ContainerStateTerminated ContainerState = "Terminated"
|
||||
ContainerStateUnknown ContainerState = "Unknown"
|
||||
)
|
||||
|
||||
func GetContainerState(state v1.ContainerState) ContainerState {
|
||||
if state.Waiting != nil {
|
||||
return ContainerStateWaiting
|
||||
}
|
||||
if state.Running != nil {
|
||||
return ContainerStateRunning
|
||||
}
|
||||
if state.Terminated != nil {
|
||||
return ContainerStateTerminated
|
||||
}
|
||||
return ContainerStateUnknown
|
||||
}
|
246
vendor/k8s.io/kubernetes/test/e2e_node/container_manager_test.go
generated
vendored
Normal file
246
vendor/k8s.io/kubernetes/test/e2e_node/container_manager_test.go
generated
vendored
Normal file
@ -0,0 +1,246 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func getOOMScoreForPid(pid int) (int, error) {
|
||||
procfsPath := path.Join("/proc", strconv.Itoa(pid), "oom_score_adj")
|
||||
out, err := exec.Command("sudo", "cat", procfsPath).CombinedOutput()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.Atoi(strings.TrimSpace(string(out)))
|
||||
}
|
||||
|
||||
func validateOOMScoreAdjSetting(pid int, expectedOOMScoreAdj int) error {
|
||||
oomScore, err := getOOMScoreForPid(pid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get oom_score_adj for %d: %v", pid, err)
|
||||
}
|
||||
if expectedOOMScoreAdj != oomScore {
|
||||
return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expectedMaxOOMScoreAdj int) error {
|
||||
oomScore, err := getOOMScoreForPid(pid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get oom_score_adj for %d", pid)
|
||||
}
|
||||
if oomScore < expectedMinOOMScoreAdj {
|
||||
return fmt.Errorf("expected pid %d's oom_score_adj to be >= %d; found %d", pid, expectedMinOOMScoreAdj, oomScore)
|
||||
}
|
||||
if oomScore < expectedMaxOOMScoreAdj {
|
||||
return fmt.Errorf("expected pid %d's oom_score_adj to be < %d; found %d", pid, expectedMaxOOMScoreAdj, oomScore)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-container-manager")
|
||||
Describe("Validate OOM score adjustments", func() {
|
||||
Context("once the node is setup", func() {
|
||||
It("docker daemon's oom-score-adj should be -999", func() {
|
||||
dockerPids, err := getPidsForProcess(dockerProcessName, dockerPidFile)
|
||||
Expect(err).To(BeNil(), "failed to get list of docker daemon pids")
|
||||
for _, pid := range dockerPids {
|
||||
Eventually(func() error {
|
||||
return validateOOMScoreAdjSetting(pid, -999)
|
||||
}, 5*time.Minute, 30*time.Second).Should(BeNil())
|
||||
}
|
||||
})
|
||||
It("Kubelet's oom-score-adj should be -999", func() {
|
||||
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
|
||||
Expect(err).To(BeNil(), "failed to get list of kubelet pids")
|
||||
Expect(len(kubeletPids)).To(Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
|
||||
Eventually(func() error {
|
||||
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
|
||||
}, 5*time.Minute, 30*time.Second).Should(BeNil())
|
||||
})
|
||||
Context("", func() {
|
||||
It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() {
|
||||
// Take a snapshot of existing pause processes. These were
|
||||
// created before this test, and may not be infra
|
||||
// containers. They should be excluded from the test.
|
||||
existingPausePIDs, err := getPidsForProcess("pause", "")
|
||||
Expect(err).To(BeNil(), "failed to list all pause processes on the node")
|
||||
existingPausePIDSet := sets.NewInt(existingPausePIDs...)
|
||||
|
||||
podClient := f.PodClient()
|
||||
podName := "besteffort" + string(uuid.NewUUID())
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.ServeHostnameImage,
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
var pausePids []int
|
||||
By("checking infra container's oom-score-adj")
|
||||
Eventually(func() error {
|
||||
pausePids, err = getPidsForProcess("pause", "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get list of pause pids: %v", err)
|
||||
}
|
||||
for _, pid := range pausePids {
|
||||
if existingPausePIDSet.Has(pid) {
|
||||
// Not created by this test. Ignore it.
|
||||
continue
|
||||
}
|
||||
if err := validateOOMScoreAdjSetting(pid, -998); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
var shPids []int
|
||||
By("checking besteffort container's oom-score-adj")
|
||||
Eventually(func() error {
|
||||
shPids, err = getPidsForProcess("serve_hostname", "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get list of serve hostname process pids: %v", err)
|
||||
}
|
||||
if len(shPids) != 1 {
|
||||
return fmt.Errorf("expected only one serve_hostname process; found %d", len(shPids))
|
||||
}
|
||||
return validateOOMScoreAdjSetting(shPids[0], 1000)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
// Log the running containers here to help debugging. Use `docker ps`
|
||||
// directly for now because the test is already docker specific.
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
By("Dump all running docker containers")
|
||||
output, err := exec.Command("docker", "ps").CombinedOutput()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Running docker containers:\n%s", string(output))
|
||||
}
|
||||
})
|
||||
})
|
||||
It("guaranteed container's oom-score-adj should be -998", func() {
|
||||
podClient := f.PodClient()
|
||||
podName := "guaranteed" + string(uuid.NewUUID())
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Name: podName,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("50Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
var (
|
||||
ngPids []int
|
||||
err error
|
||||
)
|
||||
Eventually(func() error {
|
||||
ngPids, err = getPidsForProcess("nginx", "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get list of nginx process pids: %v", err)
|
||||
}
|
||||
for _, pid := range ngPids {
|
||||
if err := validateOOMScoreAdjSetting(pid, -998); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
|
||||
})
|
||||
It("burstable container's oom-score-adj should be between [2, 1000)", func() {
|
||||
podClient := f.PodClient()
|
||||
podName := "burstable" + string(uuid.NewUUID())
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
Name: podName,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("50Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
var (
|
||||
wsPids []int
|
||||
err error
|
||||
)
|
||||
Eventually(func() error {
|
||||
wsPids, err = getPidsForProcess("test-webserver", "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get list of test-webserver process pids: %v", err)
|
||||
}
|
||||
for _, pid := range wsPids {
|
||||
if err := validateOOMScoreAdjSettingIsInRange(pid, 2, 1000); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
|
||||
// TODO: Test the oom-score-adj logic for burstable more accurately.
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
424
vendor/k8s.io/kubernetes/test/e2e_node/cpu_manager_test.go
generated
vendored
Normal file
424
vendor/k8s.io/kubernetes/test/e2e_node/cpu_manager_test.go
generated
vendored
Normal file
@ -0,0 +1,424 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Helper for makeCPUManagerPod().
|
||||
type ctnAttribute struct {
|
||||
ctnName string
|
||||
cpuRequest string
|
||||
cpuLimit string
|
||||
}
|
||||
|
||||
// makeCPUMangerPod returns a pod with the provided ctnAttributes.
|
||||
func makeCPUManagerPod(podName string, ctnAttributes []ctnAttribute) *v1.Pod {
|
||||
var containers []v1.Container
|
||||
for _, ctnAttr := range ctnAttributes {
|
||||
cpusetCmd := fmt.Sprintf("grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d")
|
||||
ctn := v1.Container{
|
||||
Name: ctnAttr.ctnName,
|
||||
Image: busyboxImage,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse(ctnAttr.cpuRequest),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse(ctnAttr.cpuLimit),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
Command: []string{"sh", "-c", cpusetCmd},
|
||||
}
|
||||
containers = append(containers, ctn)
|
||||
}
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: containers,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func deletePods(f *framework.Framework, podNames []string) {
|
||||
for _, podName := range podNames {
|
||||
gp := int64(0)
|
||||
delOpts := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(podName, &delOpts, framework.DefaultPodDeletionTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVal int64, cpuResVal int64) {
|
||||
localNodeCap := getLocalNode(f).Status.Capacity
|
||||
cpuCap := localNodeCap[v1.ResourceCPU]
|
||||
localNodeAlloc := getLocalNode(f).Status.Allocatable
|
||||
cpuAlloc := localNodeAlloc[v1.ResourceCPU]
|
||||
cpuRes := cpuCap.Copy()
|
||||
cpuRes.Sub(cpuAlloc)
|
||||
|
||||
// RoundUp reserved CPUs to get only integer cores.
|
||||
cpuRes.RoundUp(0)
|
||||
|
||||
return cpuCap.Value(), (cpuCap.Value() - cpuRes.Value()), cpuRes.Value()
|
||||
}
|
||||
|
||||
// TODO(balajismaniam): Make this func generic to all container runtimes.
|
||||
func waitForContainerRemoval(ctnPartName string) {
|
||||
Eventually(func() bool {
|
||||
err := exec.Command("/bin/sh", "-c", fmt.Sprintf("if [ -n \"$(docker ps -a | grep -i %s)\" ]; then exit 1; fi", ctnPartName)).Run()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, 2*time.Minute, 1*time.Second).Should(BeTrue())
|
||||
}
|
||||
|
||||
func isHTEnabled() bool {
|
||||
outData, err := exec.Command("/bin/sh", "-c", "lscpu | grep \"Thread(s) per core:\" | cut -d \":\" -f 2").Output()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
threadsPerCore, err := strconv.Atoi(strings.TrimSpace(string(outData)))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return threadsPerCore > 1
|
||||
}
|
||||
|
||||
func getCPUSiblingList(cpuRes int64) string {
|
||||
out, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/topology/thread_siblings_list | tr -d \"\n\r\"", cpuRes)).Output()
|
||||
framework.ExpectNoError(err)
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func setOldKubeletConfig(f *framework.Framework, oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||
if oldCfg != nil {
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, oldCfg))
|
||||
}
|
||||
}
|
||||
|
||||
func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||
// Run only if the container runtime is Docker.
|
||||
// TODO(balajismaniam): Make this test generic to all container runtimes.
|
||||
framework.RunIfContainerRuntimeIs("docker")
|
||||
|
||||
// Enable CPU Manager in Kubelet with static policy.
|
||||
oldCfg, err := getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
newCfg := oldCfg.DeepCopy()
|
||||
|
||||
// Enable CPU Manager using feature gate.
|
||||
newCfg.FeatureGates[string(features.CPUManager)] = true
|
||||
|
||||
// Set the CPU Manager policy to static.
|
||||
newCfg.CPUManagerPolicy = string(cpumanager.PolicyStatic)
|
||||
|
||||
// Set the CPU Manager reconcile period to 1 second.
|
||||
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second}
|
||||
|
||||
// The Kubelet panics if either kube-reserved or system-reserved is not set
|
||||
// when CPU Manager is enabled. Set cpu in kube-reserved > 0 so that
|
||||
// kubelet doesn't panic.
|
||||
if newCfg.KubeReserved == nil {
|
||||
newCfg.KubeReserved = map[string]string{}
|
||||
}
|
||||
|
||||
if _, ok := newCfg.KubeReserved["cpu"]; !ok {
|
||||
newCfg.KubeReserved["cpu"] = "200m"
|
||||
}
|
||||
// Update the Kubelet configuration.
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||
|
||||
// Wait for the Kubelet to be ready.
|
||||
Eventually(func() bool {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
return len(nodeList.Items) == 1
|
||||
}, time.Minute, time.Second).Should(BeTrue())
|
||||
|
||||
return oldCfg
|
||||
}
|
||||
|
||||
func runCPUManagerTests(f *framework.Framework) {
|
||||
var cpuCap, cpuAlloc, cpuRes int64
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
var cpuListString, expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
var cpu1, cpu2 int
|
||||
var cset cpuset.CPUSet
|
||||
var err error
|
||||
var ctnAttrs []ctnAttribute
|
||||
var pod, pod1, pod2 *v1.Pod
|
||||
|
||||
It("should assign CPUs as expected based on the Pod spec", func() {
|
||||
cpuCap, cpuAlloc, cpuRes = getLocalNodeCPUDetails(f)
|
||||
|
||||
// Skip CPU Manager tests altogether if the CPU capacity < 2.
|
||||
if cpuCap < 2 {
|
||||
framework.Skipf("Skipping CPU Manager tests since the CPU capacity < 2")
|
||||
}
|
||||
|
||||
// Enable CPU Manager in the kubelet.
|
||||
oldCfg = enableCPUManagerInKubelet(f)
|
||||
|
||||
By("running a non-Gu pod")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "non-gu-container",
|
||||
cpuRequest: "100m",
|
||||
cpuLimit: "200m",
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
|
||||
By("checking if the expected cpuset was assigned")
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
||||
|
||||
By("running a Gu pod")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container",
|
||||
cpuRequest: "1000m",
|
||||
cpuLimit: "1000m",
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
|
||||
By("checking if the expected cpuset was assigned")
|
||||
cpu1 = 1
|
||||
if isHTEnabled() {
|
||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||
cpu1 = cpuList[1]
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
||||
|
||||
By("running multiple Gu and non-Gu pods")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container",
|
||||
cpuRequest: "1000m",
|
||||
cpuLimit: "1000m",
|
||||
},
|
||||
}
|
||||
pod1 = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod1 = f.PodClient().CreateSync(pod1)
|
||||
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "non-gu-container",
|
||||
cpuRequest: "200m",
|
||||
cpuLimit: "300m",
|
||||
},
|
||||
}
|
||||
pod2 = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
||||
pod2 = f.PodClient().CreateSync(pod2)
|
||||
|
||||
By("checking if the expected cpuset was assigned")
|
||||
cpu1 = 1
|
||||
if isHTEnabled() {
|
||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||
cpu1 = cpuList[1]
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
|
||||
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod1.Spec.Containers[0].Name, pod1.Name)
|
||||
|
||||
cpuListString = "0"
|
||||
if cpuAlloc > 2 {
|
||||
cset = cpuset.MustParse(fmt.Sprintf("0-%d", cpuCap-1))
|
||||
cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.NewCPUSet(cpu1)))
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
|
||||
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod1.Spec.Containers[0].Name, pod1.Name))
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod2.Spec.Containers[0].Name, pod2.Name))
|
||||
|
||||
// Skip rest of the tests if CPU capacity < 3.
|
||||
if cpuCap < 3 {
|
||||
framework.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
|
||||
}
|
||||
|
||||
By("running a Gu pod requesting multiple CPUs")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container",
|
||||
cpuRequest: "2000m",
|
||||
cpuLimit: "2000m",
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
|
||||
By("checking if the expected cpuset was assigned")
|
||||
cpuListString = "1-2"
|
||||
if isHTEnabled() {
|
||||
cpuListString = "2-3"
|
||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||
if cpuList[1] != 1 {
|
||||
cset = cpuset.MustParse(getCPUSiblingList(1))
|
||||
cpuListString = fmt.Sprintf("%s", cset)
|
||||
}
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
||||
|
||||
By("running a Gu pod with multiple containers requesting integer CPUs")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container1",
|
||||
cpuRequest: "1000m",
|
||||
cpuLimit: "1000m",
|
||||
},
|
||||
{
|
||||
ctnName: "gu-container2",
|
||||
cpuRequest: "1000m",
|
||||
cpuLimit: "1000m",
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
|
||||
By("checking if the expected cpuset was assigned")
|
||||
cpu1, cpu2 = 1, 2
|
||||
if isHTEnabled() {
|
||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||
if cpuList[1] != 1 {
|
||||
cpu1, cpu2 = cpuList[1], 1
|
||||
}
|
||||
}
|
||||
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d|%d\n$", cpu1, cpu2)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[1].Name, pod.Name)
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[1].Name, pod.Name))
|
||||
|
||||
By("running multiple Gu pods")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container1",
|
||||
cpuRequest: "1000m",
|
||||
cpuLimit: "1000m",
|
||||
},
|
||||
}
|
||||
pod1 = makeCPUManagerPod("gu-pod1", ctnAttrs)
|
||||
pod1 = f.PodClient().CreateSync(pod1)
|
||||
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container2",
|
||||
cpuRequest: "1000m",
|
||||
cpuLimit: "1000m",
|
||||
},
|
||||
}
|
||||
pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs)
|
||||
pod2 = f.PodClient().CreateSync(pod2)
|
||||
|
||||
By("checking if the expected cpuset was assigned")
|
||||
cpu1, cpu2 = 1, 2
|
||||
if isHTEnabled() {
|
||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||
if cpuList[1] != 1 {
|
||||
cpu1, cpu2 = cpuList[1], 1
|
||||
}
|
||||
}
|
||||
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
|
||||
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod1.Spec.Containers[0].Name, pod1.Name)
|
||||
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu2)
|
||||
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||
|
||||
By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod1.Spec.Containers[0].Name, pod1.Name))
|
||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod2.Spec.Containers[0].Name, pod2.Name))
|
||||
|
||||
setOldKubeletConfig(f, oldCfg)
|
||||
})
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = SIGDescribe("CPU Manager [Feature:CPUManager]", func() {
|
||||
f := framework.NewDefaultFramework("cpu-manager-test")
|
||||
|
||||
Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
|
||||
runCPUManagerTests(f)
|
||||
})
|
||||
})
|
149
vendor/k8s.io/kubernetes/test/e2e_node/critical_pod_test.go
generated
vendored
Normal file
149
vendor/k8s.io/kubernetes/test/e2e_node/critical_pod_test.go
generated
vendored
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
criticalPodName = "critical-pod"
|
||||
guaranteedPodName = "guaranteed"
|
||||
burstablePodName = "burstable"
|
||||
bestEffortPodName = "best-effort"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("critical-pod-test")
|
||||
|
||||
Context("when we need to admit a critical pod", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.ExperimentalCriticalPodAnnotation)] = true
|
||||
})
|
||||
|
||||
It("should be able to create and delete a critical pod", func() {
|
||||
configEnabled, err := isKubeletConfigEnabled(f)
|
||||
framework.ExpectNoError(err)
|
||||
if !configEnabled {
|
||||
framework.Skipf("unable to run test without dynamic kubelet config enabled.")
|
||||
}
|
||||
|
||||
// Define test pods
|
||||
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
})
|
||||
nonCriticalBurstable := getTestPod(false, burstablePodName, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
})
|
||||
nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{})
|
||||
criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{
|
||||
// request the entire resource capacity of the node, so that
|
||||
// admitting this pod requires the other pod to be preempted
|
||||
Requests: getNodeCPUAndMemoryCapacity(f),
|
||||
})
|
||||
|
||||
// Create pods, starting with non-critical so that the critical preempts the other pods.
|
||||
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
|
||||
f.PodClientNS(kubeapi.NamespaceSystem).CreateSyncInNamespace(criticalPod, kubeapi.NamespaceSystem)
|
||||
|
||||
// Check that non-critical pods other than the besteffort have been evicted
|
||||
updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, p := range updatedPodList.Items {
|
||||
if p.Name == nonCriticalBestEffort.Name {
|
||||
Expect(p.Status.Phase).NotTo(Equal(v1.PodFailed), fmt.Sprintf("pod: %v should be preempted", p.Name))
|
||||
} else {
|
||||
Expect(p.Status.Phase).To(Equal(v1.PodFailed), fmt.Sprintf("pod: %v should not be preempted", p.Name))
|
||||
}
|
||||
}
|
||||
})
|
||||
AfterEach(func() {
|
||||
// Delete Pods
|
||||
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSyncInNamespace(criticalPodName, kubeapi.NamespaceSystem, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
// Log Events
|
||||
logPodEvents(f)
|
||||
logNodeEvents(f)
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
// Assuming that there is only one node, because this is a node e2e test.
|
||||
Expect(len(nodeList.Items)).To(Equal(1))
|
||||
capacity := nodeList.Items[0].Status.Allocatable
|
||||
return v1.ResourceList{
|
||||
v1.ResourceCPU: capacity[v1.ResourceCPU],
|
||||
v1.ResourceMemory: capacity[v1.ResourceMemory],
|
||||
}
|
||||
}
|
||||
|
||||
func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if critical {
|
||||
pod.ObjectMeta.Namespace = kubeapi.NamespaceSystem
|
||||
pod.ObjectMeta.Annotations = map[string]string{
|
||||
kubelettypes.CriticalPodAnnotationKey: "",
|
||||
}
|
||||
Expect(kubelettypes.IsCriticalPod(pod)).To(BeTrue(), "pod should be a critical pod")
|
||||
} else {
|
||||
Expect(kubelettypes.IsCriticalPod(pod)).To(BeFalse(), "pod should not be a critical pod")
|
||||
}
|
||||
return pod
|
||||
}
|
602
vendor/k8s.io/kubernetes/test/e2e_node/density_test.go
generated
vendored
Normal file
602
vendor/k8s.io/kubernetes/test/e2e_node/density_test.go
generated
vendored
Normal file
@ -0,0 +1,602 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
kubeletAddr = "localhost:10255"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
const (
|
||||
// The data collection time of resource collector and the standalone cadvisor
|
||||
// is not synchronizated, so resource collector may miss data or
|
||||
// collect duplicated data
|
||||
containerStatsPollingPeriod = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
rc *ResourceCollector
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("density-test")
|
||||
|
||||
BeforeEach(func() {
|
||||
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
|
||||
f.PodClient().CreateSync(getCadvisorPod())
|
||||
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
|
||||
// 1s housingkeeping interval
|
||||
rc = NewResourceCollector(containerStatsPollingPeriod)
|
||||
})
|
||||
|
||||
Context("create a batch of pods", func() {
|
||||
// TODO(coufon): the values are generous, set more precise limits with benchmark data
|
||||
// and add more tests
|
||||
dTests := []densityTest{
|
||||
{
|
||||
podsNr: 10,
|
||||
interval: 0 * time.Millisecond,
|
||||
cpuLimits: framework.ContainersCPUSummary{
|
||||
stats.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.50},
|
||||
stats.SystemContainerRuntime: {0.50: 0.40, 0.95: 0.60},
|
||||
},
|
||||
memLimits: framework.ResourceUsagePerContainer{
|
||||
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
|
||||
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
|
||||
},
|
||||
// percentile limit of single pod startup latency
|
||||
podStartupLimits: framework.LatencyMetric{
|
||||
Perc50: 16 * time.Second,
|
||||
Perc90: 18 * time.Second,
|
||||
Perc99: 20 * time.Second,
|
||||
},
|
||||
// upbound of startup latency of a batch of pods
|
||||
podBatchStartupLimit: 25 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
|
||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, false)
|
||||
|
||||
By("Verifying latency")
|
||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
|
||||
|
||||
By("Verifying resource")
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Context("create a batch of pods", func() {
|
||||
dTests := []densityTest{
|
||||
{
|
||||
podsNr: 10,
|
||||
interval: 0 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
podsNr: 35,
|
||||
interval: 0 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
podsNr: 105,
|
||||
interval: 0 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
podsNr: 10,
|
||||
interval: 100 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
podsNr: 35,
|
||||
interval: 100 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
podsNr: 105,
|
||||
interval: 100 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
podsNr: 10,
|
||||
interval: 300 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
podsNr: 35,
|
||||
interval: 300 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
podsNr: 105,
|
||||
interval: 300 * time.Millisecond,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]", itArg.podsNr, itArg.interval)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
|
||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||
|
||||
By("Verifying latency")
|
||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||
|
||||
By("Verifying resource")
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Context("create a batch of pods with higher API QPS", func() {
|
||||
dTests := []densityTest{
|
||||
{
|
||||
podsNr: 105,
|
||||
interval: 0 * time.Millisecond,
|
||||
APIQPSLimit: 60,
|
||||
},
|
||||
{
|
||||
podsNr: 105,
|
||||
interval: 100 * time.Millisecond,
|
||||
APIQPSLimit: 60,
|
||||
},
|
||||
{
|
||||
podsNr: 105,
|
||||
interval: 300 * time.Millisecond,
|
||||
APIQPSLimit: 60,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
||||
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
||||
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
|
||||
// Note that it will cause higher resource usage.
|
||||
setKubeletAPIQPSLimit(f, int32(itArg.APIQPSLimit))
|
||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||
|
||||
By("Verifying latency")
|
||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||
|
||||
By("Verifying resource")
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Context("create a sequence of pods", func() {
|
||||
dTests := []densityTest{
|
||||
{
|
||||
podsNr: 10,
|
||||
bgPodsNr: 50,
|
||||
cpuLimits: framework.ContainersCPUSummary{
|
||||
stats.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.50},
|
||||
stats.SystemContainerRuntime: {0.50: 0.40, 0.95: 0.60},
|
||||
},
|
||||
memLimits: framework.ResourceUsagePerContainer{
|
||||
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
|
||||
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
|
||||
},
|
||||
podStartupLimits: framework.LatencyMetric{
|
||||
Perc50: 5000 * time.Millisecond,
|
||||
Perc90: 9000 * time.Millisecond,
|
||||
Perc99: 10000 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods", itArg.podsNr, itArg.bgPodsNr)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "sequence"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
||||
|
||||
By("Verifying latency")
|
||||
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
|
||||
|
||||
By("Verifying resource")
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Context("create a sequence of pods", func() {
|
||||
dTests := []densityTest{
|
||||
{
|
||||
podsNr: 10,
|
||||
bgPodsNr: 50,
|
||||
},
|
||||
{
|
||||
podsNr: 30,
|
||||
bgPodsNr: 50,
|
||||
},
|
||||
{
|
||||
podsNr: 50,
|
||||
bgPodsNr: 50,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark]", itArg.podsNr, itArg.bgPodsNr)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "sequence"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
||||
|
||||
By("Verifying latency")
|
||||
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||
|
||||
By("Verifying resource")
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
type densityTest struct {
|
||||
// number of pods
|
||||
podsNr int
|
||||
// number of background pods
|
||||
bgPodsNr int
|
||||
// interval between creating pod (rate control)
|
||||
interval time.Duration
|
||||
// create pods in 'batch' or 'sequence'
|
||||
createMethod string
|
||||
// API QPS limit
|
||||
APIQPSLimit int
|
||||
// performance limits
|
||||
cpuLimits framework.ContainersCPUSummary
|
||||
memLimits framework.ResourceUsagePerContainer
|
||||
podStartupLimits framework.LatencyMetric
|
||||
podBatchStartupLimit time.Duration
|
||||
}
|
||||
|
||||
func (dt *densityTest) getTestName() string {
|
||||
// The current default API QPS limit is 5
|
||||
// TODO(coufon): is there any way to not hard code this?
|
||||
APIQPSLimit := 5
|
||||
if dt.APIQPSLimit > 0 {
|
||||
APIQPSLimit = dt.APIQPSLimit
|
||||
}
|
||||
return fmt.Sprintf("density_create_%s_%d_%d_%d_%d", dt.createMethod, dt.podsNr, dt.bgPodsNr,
|
||||
dt.interval.Nanoseconds()/1000000, APIQPSLimit)
|
||||
}
|
||||
|
||||
// runDensityBatchTest runs the density batch pod creation test
|
||||
func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string,
|
||||
isLogTimeSeries bool) (time.Duration, []framework.PodLatencyData) {
|
||||
const (
|
||||
podType = "density_test_pod"
|
||||
sleepBeforeCreatePods = 30 * time.Second
|
||||
)
|
||||
var (
|
||||
mutex = &sync.Mutex{}
|
||||
watchTimes = make(map[string]metav1.Time, 0)
|
||||
stopCh = make(chan struct{})
|
||||
)
|
||||
|
||||
// create test pod data structure
|
||||
pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType)
|
||||
|
||||
// the controller watches the change of pod status
|
||||
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
||||
go controller.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
// TODO(coufon): in the test we found kubelet starts while it is busy on something, as a result 'syncLoop'
|
||||
// does not response to pod creation immediately. Creating the first pod has a delay around 5s.
|
||||
// The node status has already been 'ready' so `wait and check node being ready does not help here.
|
||||
// Now wait here for a grace period to let 'syncLoop' be ready
|
||||
time.Sleep(sleepBeforeCreatePods)
|
||||
|
||||
rc.Start()
|
||||
|
||||
By("Creating a batch of pods")
|
||||
// It returns a map['pod name']'creation time' containing the creation timestamps
|
||||
createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)
|
||||
|
||||
By("Waiting for all Pods to be observed by the watch...")
|
||||
|
||||
Eventually(func() bool {
|
||||
return len(watchTimes) == testArg.podsNr
|
||||
}, 10*time.Minute, 10*time.Second).Should(BeTrue())
|
||||
|
||||
if len(watchTimes) < testArg.podsNr {
|
||||
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
|
||||
}
|
||||
|
||||
// Analyze results
|
||||
var (
|
||||
firstCreate metav1.Time
|
||||
lastRunning metav1.Time
|
||||
init = true
|
||||
e2eLags = make([]framework.PodLatencyData, 0)
|
||||
)
|
||||
|
||||
for name, create := range createTimes {
|
||||
watch, ok := watchTimes[name]
|
||||
Expect(ok).To(Equal(true))
|
||||
|
||||
e2eLags = append(e2eLags,
|
||||
framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})
|
||||
|
||||
if !init {
|
||||
if firstCreate.Time.After(create.Time) {
|
||||
firstCreate = create
|
||||
}
|
||||
if lastRunning.Time.Before(watch.Time) {
|
||||
lastRunning = watch
|
||||
}
|
||||
} else {
|
||||
init = false
|
||||
firstCreate, lastRunning = create, watch
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(framework.LatencySlice(e2eLags))
|
||||
batchLag := lastRunning.Time.Sub(firstCreate.Time)
|
||||
|
||||
rc.Stop()
|
||||
deletePodsSync(f, pods)
|
||||
|
||||
// Log time series data.
|
||||
if isLogTimeSeries {
|
||||
logDensityTimeSeries(rc, createTimes, watchTimes, testInfo)
|
||||
}
|
||||
// Log throughput data.
|
||||
logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testInfo)
|
||||
|
||||
deletePodsSync(f, []*v1.Pod{getCadvisorPod()})
|
||||
|
||||
return batchLag, e2eLags
|
||||
}
|
||||
|
||||
// runDensitySeqTest runs the density sequential pod creation test
|
||||
func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string) (time.Duration, []framework.PodLatencyData) {
|
||||
const (
|
||||
podType = "density_test_pod"
|
||||
sleepBeforeCreatePods = 30 * time.Second
|
||||
)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, true, framework.GetPauseImageNameForHostArch(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType)
|
||||
|
||||
By("Creating a batch of background pods")
|
||||
|
||||
// CreatBatch is synchronized, all pods are running when it returns
|
||||
f.PodClient().CreateBatch(bgPods)
|
||||
|
||||
time.Sleep(sleepBeforeCreatePods)
|
||||
|
||||
rc.Start()
|
||||
|
||||
// Create pods sequentially (back-to-back). e2eLags have been sorted.
|
||||
batchlag, e2eLags := createBatchPodSequential(f, testPods)
|
||||
|
||||
rc.Stop()
|
||||
deletePodsSync(f, append(bgPods, testPods...))
|
||||
|
||||
// Log throughput data.
|
||||
logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo)
|
||||
|
||||
deletePodsSync(f, []*v1.Pod{getCadvisorPod()})
|
||||
|
||||
return batchlag, e2eLags
|
||||
}
|
||||
|
||||
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
|
||||
// between creations there is an interval for throughput control
|
||||
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
|
||||
createTimes := make(map[string]metav1.Time)
|
||||
for _, pod := range pods {
|
||||
createTimes[pod.ObjectMeta.Name] = metav1.Now()
|
||||
go f.PodClient().Create(pod)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
return createTimes
|
||||
}
|
||||
|
||||
// getPodStartLatency gets prometheus metric 'pod start latency' from kubelet
|
||||
func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) {
|
||||
latencyMetrics := framework.KubeletLatencyMetrics{}
|
||||
ms, err := metrics.GrabKubeletMetricsWithoutProxy(node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, samples := range ms {
|
||||
for _, sample := range samples {
|
||||
if sample.Metric["__name__"] == kubemetrics.KubeletSubsystem+"_"+kubemetrics.PodStartLatencyKey {
|
||||
quantile, _ := strconv.ParseFloat(string(sample.Metric["quantile"]), 64)
|
||||
latencyMetrics = append(latencyMetrics,
|
||||
framework.KubeletLatencyMetric{
|
||||
Quantile: quantile,
|
||||
Method: kubemetrics.PodStartLatencyKey,
|
||||
Latency: time.Duration(int(sample.Value)) * time.Microsecond})
|
||||
}
|
||||
}
|
||||
}
|
||||
return latencyMetrics, nil
|
||||
}
|
||||
|
||||
// verifyPodStartupLatency verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
|
||||
// within the threshold.
|
||||
func verifyPodStartupLatency(expect, actual framework.LatencyMetric) error {
|
||||
if actual.Perc50 > expect.Perc50 {
|
||||
return fmt.Errorf("too high pod startup latency 50th percentile: %v", actual.Perc50)
|
||||
}
|
||||
if actual.Perc90 > expect.Perc90 {
|
||||
return fmt.Errorf("too high pod startup latency 90th percentile: %v", actual.Perc90)
|
||||
}
|
||||
if actual.Perc99 > expect.Perc99 {
|
||||
return fmt.Errorf("too high pod startup latency 99th percentile: %v", actual.Perc99)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newInformerWatchPod creates an informer to check whether all pods are running.
|
||||
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller {
|
||||
ns := f.Namespace.Name
|
||||
checkPodRunning := func(p *v1.Pod) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
defer GinkgoRecover()
|
||||
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
if _, found := watchTimes[p.Name]; !found {
|
||||
watchTimes[p.Name] = metav1.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
|
||||
obj, err := f.ClientSet.CoreV1().Pods(ns).List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
|
||||
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Pod{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
p, ok := obj.(*v1.Pod)
|
||||
Expect(ok).To(Equal(true))
|
||||
go checkPodRunning(p)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
p, ok := newObj.(*v1.Pod)
|
||||
Expect(ok).To(Equal(true))
|
||||
go checkPodRunning(p)
|
||||
},
|
||||
},
|
||||
)
|
||||
return controller
|
||||
}
|
||||
|
||||
// createBatchPodSequential creates pods back-to-back in sequence.
|
||||
func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod) (time.Duration, []framework.PodLatencyData) {
|
||||
batchStartTime := metav1.Now()
|
||||
e2eLags := make([]framework.PodLatencyData, 0)
|
||||
for _, pod := range pods {
|
||||
create := metav1.Now()
|
||||
f.PodClient().CreateSync(pod)
|
||||
e2eLags = append(e2eLags,
|
||||
framework.PodLatencyData{Name: pod.Name, Latency: metav1.Now().Time.Sub(create.Time)})
|
||||
}
|
||||
batchLag := metav1.Now().Time.Sub(batchStartTime.Time)
|
||||
sort.Sort(framework.LatencySlice(e2eLags))
|
||||
return batchLag, e2eLags
|
||||
}
|
||||
|
||||
// logAndVerifyLatency verifies that whether pod creation latency satisfies the limit.
|
||||
func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyData, podStartupLimits framework.LatencyMetric,
|
||||
podBatchStartupLimit time.Duration, testInfo map[string]string, isVerify bool) {
|
||||
framework.PrintLatencies(e2eLags, "worst client e2e total latencies")
|
||||
|
||||
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
|
||||
latencyMetrics, _ := getPodStartLatency(kubeletAddr)
|
||||
framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics))
|
||||
|
||||
podCreateLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLags)}
|
||||
|
||||
// log latency perf data
|
||||
logPerfData(getLatencyPerfData(podCreateLatency.Latency, testInfo), "latency")
|
||||
|
||||
if isVerify {
|
||||
// check whether e2e pod startup time is acceptable.
|
||||
framework.ExpectNoError(verifyPodStartupLatency(podStartupLimits, podCreateLatency.Latency))
|
||||
|
||||
// check bactch pod creation latency
|
||||
if podBatchStartupLimit > 0 {
|
||||
Expect(batchLag <= podBatchStartupLimit).To(Equal(true), "Batch creation startup time %v exceed limit %v",
|
||||
batchLag, podBatchStartupLimit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// logThroughput calculates and logs pod creation throughput.
|
||||
func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testInfo map[string]string) {
|
||||
logPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo), "throughput")
|
||||
}
|
||||
|
||||
// setKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
|
||||
func setKubeletAPIQPSLimit(f *framework.Framework, newAPIQPS int32) {
|
||||
const restartGap = 40 * time.Second
|
||||
|
||||
resp := pollConfigz(2*time.Minute, 5*time.Second)
|
||||
kubeCfg, err := decodeConfigz(resp)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Old QPS limit is: %d\n", kubeCfg.KubeAPIQPS)
|
||||
|
||||
// Set new API QPS limit
|
||||
kubeCfg.KubeAPIQPS = newAPIQPS
|
||||
// TODO(coufon): createConfigMap should firstly check whether configmap already exists, if so, use updateConfigMap.
|
||||
// Calling createConfigMap twice will result in error. It is fine for benchmark test because we only run one test on a new node.
|
||||
_, err = createConfigMap(f, kubeCfg)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for Kubelet to restart
|
||||
time.Sleep(restartGap)
|
||||
|
||||
// Check new QPS has been set
|
||||
resp = pollConfigz(2*time.Minute, 5*time.Second)
|
||||
kubeCfg, err = decodeConfigz(resp)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("New QPS limit is: %d\n", kubeCfg.KubeAPIQPS)
|
||||
|
||||
// TODO(coufon): check test result to see if we need to retry here
|
||||
if kubeCfg.KubeAPIQPS != newAPIQPS {
|
||||
framework.Failf("Fail to set new kubelet API QPS limit.")
|
||||
}
|
||||
}
|
19
vendor/k8s.io/kubernetes/test/e2e_node/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/test/e2e_node/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// e2e_node contains e2e tests specific to the node
|
||||
// TODO: rename this package e2e-node
|
||||
package e2e_node // import "k8s.io/kubernetes/test/e2e_node"
|
178
vendor/k8s.io/kubernetes/test/e2e_node/docker_test.go
generated
vendored
Normal file
178
vendor/k8s.io/kubernetes/test/e2e_node/docker_test.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Docker features [Feature:Docker]", func() {
|
||||
f := framework.NewDefaultFramework("docker-feature-test")
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.RunIfContainerRuntimeIs("docker")
|
||||
})
|
||||
|
||||
Context("when shared PID namespace is enabled", func() {
|
||||
It("processes in different containers of the same pod should be able to see each other", func() {
|
||||
// TODO(yguo0905): Change this test to run unless the runtime is
|
||||
// Docker and its version is <1.13.
|
||||
By("Check whether shared PID namespace is supported.")
|
||||
isEnabled, err := isSharedPIDNamespaceSupported()
|
||||
framework.ExpectNoError(err)
|
||||
if !isEnabled {
|
||||
framework.Skipf("Skipped because shared PID namespace is not supported by this docker version.")
|
||||
}
|
||||
|
||||
By("Create a pod with two containers.")
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container-1",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/top"},
|
||||
},
|
||||
{
|
||||
Name: "test-container-2",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/sleep"},
|
||||
Args: []string{"10000"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("Check if the process in one container is visible to the process in the other.")
|
||||
pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||
pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
|
||||
if pid1 != pid2 {
|
||||
framework.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("when live-restore is enabled [Serial] [Slow] [Disruptive]", func() {
|
||||
It("containers should not be disrupted when the daemon shuts down and restarts", func() {
|
||||
const (
|
||||
podName = "live-restore-test-pod"
|
||||
containerName = "live-restore-test-container"
|
||||
)
|
||||
|
||||
isSupported, err := isDockerLiveRestoreSupported()
|
||||
framework.ExpectNoError(err)
|
||||
if !isSupported {
|
||||
framework.Skipf("Docker live-restore is not supported.")
|
||||
}
|
||||
isEnabled, err := isDockerLiveRestoreEnabled()
|
||||
framework.ExpectNoError(err)
|
||||
if !isEnabled {
|
||||
framework.Skipf("Docker live-restore is not enabled.")
|
||||
}
|
||||
|
||||
By("Create the test pod.")
|
||||
pod := f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
By("Ensure that the container is running before Docker is down.")
|
||||
Eventually(func() bool {
|
||||
return isContainerRunning(pod.Status.PodIP)
|
||||
}).Should(BeTrue())
|
||||
|
||||
startTime1, err := getContainerStartTime(f, podName, containerName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Stop Docker daemon.")
|
||||
framework.ExpectNoError(stopDockerDaemon())
|
||||
isDockerDown := true
|
||||
defer func() {
|
||||
if isDockerDown {
|
||||
By("Start Docker daemon.")
|
||||
framework.ExpectNoError(startDockerDaemon())
|
||||
}
|
||||
}()
|
||||
|
||||
By("Ensure that the container is running after Docker is down.")
|
||||
Consistently(func() bool {
|
||||
return isContainerRunning(pod.Status.PodIP)
|
||||
}).Should(BeTrue())
|
||||
|
||||
By("Start Docker daemon.")
|
||||
framework.ExpectNoError(startDockerDaemon())
|
||||
isDockerDown = false
|
||||
|
||||
By("Ensure that the container is running after Docker has restarted.")
|
||||
Consistently(func() bool {
|
||||
return isContainerRunning(pod.Status.PodIP)
|
||||
}).Should(BeTrue())
|
||||
|
||||
By("Ensure that the container has not been restarted after Docker is restarted.")
|
||||
Consistently(func() bool {
|
||||
startTime2, err := getContainerStartTime(f, podName, containerName)
|
||||
framework.ExpectNoError(err)
|
||||
return startTime1 == startTime2
|
||||
}, 3*time.Second, time.Second).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// isContainerRunning returns true if the container is running by checking
|
||||
// whether the server is responding, and false otherwise.
|
||||
func isContainerRunning(podIP string) bool {
|
||||
output, err := runCommand("curl", podIP)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(output, "Welcome to nginx!")
|
||||
}
|
||||
|
||||
// getContainerStartTime returns the start time of the container with the
|
||||
// containerName of the pod having the podName.
|
||||
func getContainerStartTime(f *framework.Framework, podName, containerName string) (time.Time, error) {
|
||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to get pod %q: %v", podName, err)
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.Name != containerName {
|
||||
continue
|
||||
}
|
||||
if status.State.Running == nil {
|
||||
return time.Time{}, fmt.Errorf("%v/%v is not running", podName, containerName)
|
||||
}
|
||||
return status.State.Running.StartedAt.Time, nil
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("failed to find %v/%v", podName, containerName)
|
||||
}
|
112
vendor/k8s.io/kubernetes/test/e2e_node/docker_util.go
generated
vendored
Normal file
112
vendor/k8s.io/kubernetes/test/e2e_node/docker_util.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/blang/semver"
|
||||
systemdutil "github.com/coreos/go-systemd/util"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDockerEndpoint = "unix:///var/run/docker.sock"
|
||||
dockerDaemonConfigName = "/etc/docker/daemon.json"
|
||||
)
|
||||
|
||||
// getDockerAPIVersion returns the Docker's API version.
|
||||
func getDockerAPIVersion() (semver.Version, error) {
|
||||
c, err := client.NewClient(defaultDockerEndpoint, "", nil, nil)
|
||||
if err != nil {
|
||||
return semver.Version{}, fmt.Errorf("failed to create docker client: %v", err)
|
||||
}
|
||||
version, err := c.ServerVersion(context.Background())
|
||||
if err != nil {
|
||||
return semver.Version{}, fmt.Errorf("failed to get docker server version: %v", err)
|
||||
}
|
||||
return semver.MustParse(version.APIVersion + ".0"), nil
|
||||
}
|
||||
|
||||
// isSharedPIDNamespaceSupported returns true if the Docker version is 1.13.1+
|
||||
// (API version 1.26+), and false otherwise.
|
||||
func isSharedPIDNamespaceSupported() (bool, error) {
|
||||
version, err := getDockerAPIVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return version.GTE(semver.MustParse("1.26.0")), nil
|
||||
}
|
||||
|
||||
// isDockerNoNewPrivilegesSupported returns true if Docker version is 1.11+
|
||||
// (API version 1.23+), and false otherwise.
|
||||
func isDockerNoNewPrivilegesSupported() (bool, error) {
|
||||
version, err := getDockerAPIVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return version.GTE(semver.MustParse("1.23.0")), nil
|
||||
}
|
||||
|
||||
// isDockerLiveRestoreSupported returns true if live-restore is supported in
|
||||
// the current Docker version.
|
||||
func isDockerLiveRestoreSupported() (bool, error) {
|
||||
version, err := getDockerAPIVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return version.GTE(semver.MustParse("1.26.0")), nil
|
||||
}
|
||||
|
||||
// isDockerLiveRestoreEnabled returns true if live-restore is enabled in the
|
||||
// Docker.
|
||||
func isDockerLiveRestoreEnabled() (bool, error) {
|
||||
c, err := client.NewClient(defaultDockerEndpoint, "", nil, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create docker client: %v", err)
|
||||
}
|
||||
info, err := c.Info(context.Background())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get docker info: %v", err)
|
||||
}
|
||||
return info.LiveRestoreEnabled, nil
|
||||
}
|
||||
|
||||
// startDockerDaemon starts the Docker daemon.
|
||||
func startDockerDaemon() error {
|
||||
switch {
|
||||
case systemdutil.IsRunningSystemd():
|
||||
_, err := runCommand("systemctl", "start", "docker")
|
||||
return err
|
||||
default:
|
||||
_, err := runCommand("service", "docker", "start")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// stopDockerDaemon stops the Docker daemon.
|
||||
func stopDockerDaemon() error {
|
||||
switch {
|
||||
case systemdutil.IsRunningSystemd():
|
||||
_, err := runCommand("systemctl", "stop", "docker")
|
||||
return err
|
||||
default:
|
||||
_, err := runCommand("service", "docker", "stop")
|
||||
return err
|
||||
}
|
||||
}
|
224
vendor/k8s.io/kubernetes/test/e2e_node/dockershim_checkpoint_test.go
generated
vendored
Normal file
224
vendor/k8s.io/kubernetes/test/e2e_node/dockershim_checkpoint_test.go
generated
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
testCheckpoint = "checkpoint-test"
|
||||
// Container GC Period is 1 minute
|
||||
gcTimeout = 3 * time.Minute
|
||||
testCheckpointContent = `{"version":"v1","name":"fluentd-gcp-v2.0-vmnqx","namespace":"kube-system","data":{},"checksum":1799154314}`
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func() {
|
||||
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
|
||||
|
||||
It("should clean up pod sandbox checkpoint after pod deletion", func() {
|
||||
podName := "pod-checkpoint-no-disrupt"
|
||||
runPodCheckpointTest(f, podName, func() {
|
||||
checkpoints := findCheckpoints(podName)
|
||||
if len(checkpoints) == 0 {
|
||||
framework.Failf("No checkpoint for the pod was found")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
It("should remove dangling checkpoint file", func() {
|
||||
filename := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s/%s", testCheckpoint, f.Namespace.Name))))
|
||||
fullpath := path.Join(framework.TestContext.DockershimCheckpointDir, filename)
|
||||
|
||||
By(fmt.Sprintf("Write a file at %q", fullpath))
|
||||
err := writeFileAndSync(fullpath, []byte(testCheckpointContent))
|
||||
framework.ExpectNoError(err, "Failed to create file %q", fullpath)
|
||||
|
||||
By("Check if file is removed")
|
||||
Eventually(func() bool {
|
||||
if _, err := os.Stat(fullpath); os.IsNotExist(err) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}, gcTimeout, 10*time.Second).Should(BeTrue())
|
||||
|
||||
})
|
||||
|
||||
Context("When pod sandbox checkpoint is missing", func() {
|
||||
It("should complete pod sandbox clean up", func() {
|
||||
podName := "pod-checkpoint-missing"
|
||||
runPodCheckpointTest(f, podName, func() {
|
||||
checkpoints := findCheckpoints(podName)
|
||||
if len(checkpoints) == 0 {
|
||||
framework.Failf("No checkpoint for the pod was found")
|
||||
}
|
||||
By("Removing checkpoint of test pod")
|
||||
for _, filename := range checkpoints {
|
||||
if len(filename) == 0 {
|
||||
continue
|
||||
}
|
||||
framework.Logf("Removing checkpiont %q", filename)
|
||||
_, err := exec.Command("sudo", "rm", filename).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to remove checkpoint file %q: %v", string(filename), err)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("When all containers in pod are missing", func() {
|
||||
It("should complete pod sandbox clean up based on the information in sandbox checkpoint", func() {
|
||||
runPodCheckpointTest(f, "pod-containers-missing", func() {
|
||||
By("Gathering pod container ids")
|
||||
stdout, err := exec.Command("sudo", "docker", "ps", "-q", "-f",
|
||||
fmt.Sprintf("name=%s", f.Namespace.Name)).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to run docker ps: %v", err)
|
||||
lines := strings.Split(string(stdout), "\n")
|
||||
ids := []string{}
|
||||
for _, id := range lines {
|
||||
id = cleanString(id)
|
||||
if len(id) > 0 {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
}
|
||||
|
||||
By("Stop and remove pod containers")
|
||||
dockerStopCmd := append([]string{"docker", "stop"}, ids...)
|
||||
_, err = exec.Command("sudo", dockerStopCmd...).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to run command %v: %v", dockerStopCmd, err)
|
||||
dockerRmCmd := append([]string{"docker", "rm"}, ids...)
|
||||
_, err = exec.Command("sudo", dockerRmCmd...).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to run command %v: %v", dockerRmCmd, err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("When checkpoint file is corrupted", func() {
|
||||
It("should complete pod sandbox clean up", func() {
|
||||
podName := "pod-checkpoint-corrupted"
|
||||
runPodCheckpointTest(f, podName, func() {
|
||||
By("Corrupt checkpoint file")
|
||||
checkpoints := findCheckpoints(podName)
|
||||
if len(checkpoints) == 0 {
|
||||
framework.Failf("No checkpoint for the pod was found")
|
||||
}
|
||||
for _, file := range checkpoints {
|
||||
f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0644)
|
||||
framework.ExpectNoError(err, "Failed to open file %q", file)
|
||||
_, err = f.WriteString("blabblab")
|
||||
framework.ExpectNoError(err, "Failed to write to file %q", file)
|
||||
f.Sync()
|
||||
f.Close()
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func runPodCheckpointTest(f *framework.Framework, podName string, twist func()) {
|
||||
podName = podName + string(uuid.NewUUID())
|
||||
By(fmt.Sprintf("Creating test pod: %s", podName))
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Name: "pause-container",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("Performing disruptive operations")
|
||||
twist()
|
||||
|
||||
By("Remove test pod")
|
||||
f.PodClient().DeleteSync(podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
By("Waiting for checkpoint to be removed")
|
||||
if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) {
|
||||
checkpoints := findCheckpoints(podName)
|
||||
if len(checkpoints) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Checkpoint of %q still exists: %v", podName, checkpoints)
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Failf("Failed to observe checkpoint being removed within timeout: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanString cleans up any trailing spaces and new line character for the input string
|
||||
func cleanString(output string) string {
|
||||
processed := strings.TrimSpace(string(output))
|
||||
regex := regexp.MustCompile(`\r?\n`)
|
||||
processed = regex.ReplaceAllString(processed, "")
|
||||
return processed
|
||||
}
|
||||
|
||||
func writeFileAndSync(path string, data []byte) error {
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Sync()
|
||||
if err1 := f.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// findCheckpoints returns all checkpoint files containing input string
|
||||
func findCheckpoints(match string) []string {
|
||||
By(fmt.Sprintf("Search checkpoints containing %q", match))
|
||||
checkpoints := []string{}
|
||||
stdout, err := exec.Command("sudo", "grep", "-rl", match, framework.TestContext.DockershimCheckpointDir).CombinedOutput()
|
||||
if err != nil {
|
||||
framework.Logf("grep from dockershim checkpoint directory returns error: %v", err)
|
||||
}
|
||||
if stdout == nil {
|
||||
return checkpoints
|
||||
}
|
||||
files := strings.Split(string(stdout), "\n")
|
||||
for _, file := range files {
|
||||
cleaned := cleanString(file)
|
||||
if len(cleaned) == 0 {
|
||||
continue
|
||||
}
|
||||
checkpoints = append(checkpoints, cleaned)
|
||||
}
|
||||
return checkpoints
|
||||
}
|
406
vendor/k8s.io/kubernetes/test/e2e_node/dynamic_kubelet_config_test.go
generated
vendored
Normal file
406
vendor/k8s.io/kubernetes/test/e2e_node/dynamic_kubelet_config_test.go
generated
vendored
Normal file
@ -0,0 +1,406 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type configState struct {
|
||||
desc string
|
||||
configSource *apiv1.NodeConfigSource
|
||||
expectConfigOK *apiv1.NodeCondition
|
||||
expectConfig *kubeletconfig.KubeletConfiguration
|
||||
}
|
||||
|
||||
// This test is marked [Disruptive] because the Kubelet restarts several times during this test.
|
||||
var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKubeletConfig] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test")
|
||||
var originalKC *kubeletconfig.KubeletConfiguration
|
||||
var originalConfigMap *apiv1.ConfigMap
|
||||
|
||||
// Dummy context to prevent framework's AfterEach from cleaning up before this test's AfterEach can run
|
||||
Context("", func() {
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
if originalConfigMap == nil {
|
||||
originalKC, err = getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
originalConfigMap = newKubeletConfigMap("original-values", originalKC)
|
||||
originalConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(originalConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
// make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to test
|
||||
enabled, err := isKubeletConfigEnabled(f)
|
||||
framework.ExpectNoError(err)
|
||||
if !enabled {
|
||||
framework.ExpectNoError(fmt.Errorf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
|
||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
|
||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`."))
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Set the config back to the original values before moving on.
|
||||
// We care that the values are the same, not where they come from, so it
|
||||
// should be fine to reset the values using a remote config, even if they
|
||||
// were initially set via the locally provisioned configuration.
|
||||
// This is the same strategy several other e2e node tests use.
|
||||
setAndTestKubeletConfigState(f, &configState{desc: "reset to original values",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: originalConfigMap.UID,
|
||||
Namespace: originalConfigMap.Namespace,
|
||||
Name: originalConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, originalConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: originalKC})
|
||||
})
|
||||
|
||||
Context("When setting new NodeConfigSources that cause transitions between ConfigOK conditions", func() {
|
||||
It("the Kubelet should report the appropriate status and configz", func() {
|
||||
var err error
|
||||
// we base the "correct" configmap off of the current configuration,
|
||||
// but we also set the trial duration very high to prevent changing the last-known-good
|
||||
correctKC := originalKC.DeepCopy()
|
||||
correctKC.ConfigTrialDuration = &metav1.Duration{Duration: time.Hour}
|
||||
correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-correct", correctKC)
|
||||
correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// fail to parse, we insert some bogus stuff into the configMap
|
||||
failParseConfigMap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "dynamic-kubelet-config-test-fail-parse"},
|
||||
Data: map[string]string{
|
||||
"kubelet": "{0xdeadbeef}",
|
||||
},
|
||||
}
|
||||
failParseConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failParseConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// fail to validate, we make a copy and set an invalid KubeAPIQPS on kc before serializing
|
||||
invalidKC := correctKC.DeepCopy()
|
||||
|
||||
invalidKC.KubeAPIQPS = -1
|
||||
failValidateConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-fail-validate", invalidKC)
|
||||
failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
states := []configState{
|
||||
// Node.Spec.ConfigSource is nil
|
||||
{desc: "Node.Spec.ConfigSource is nil",
|
||||
configSource: nil,
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: status.CurDefaultMessage,
|
||||
Reason: status.CurDefaultOKReason},
|
||||
expectConfig: nil},
|
||||
|
||||
// Node.Spec.ConfigSource has all nil subfields
|
||||
{desc: "Node.Spec.ConfigSource has all nil subfields",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: nil},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonAllNilSubfields)},
|
||||
expectConfig: nil},
|
||||
|
||||
// Node.Spec.ConfigSource.ConfigMapRef is partial
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMapRef is partial",
|
||||
// TODO(mtaufen): check the other 7 partials in a unit test
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: "foo",
|
||||
Name: "bar"}}, // missing Namespace
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonPartialObjectReference)},
|
||||
expectConfig: nil},
|
||||
|
||||
// Node.Spec.ConfigSource's UID does not align with namespace/name
|
||||
{desc: "Node.Spec.ConfigSource's UID does not align with namespace/name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{UID: "foo",
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", correctConfigMap.UID))},
|
||||
expectConfig: nil},
|
||||
|
||||
// correct
|
||||
{desc: "correct",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: correctConfigMap.UID,
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, correctConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: correctKC},
|
||||
|
||||
// fail-parse
|
||||
{desc: "fail-parse",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: failParseConfigMap.UID,
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgDefaultMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, failParseConfigMap.UID)},
|
||||
expectConfig: nil},
|
||||
|
||||
// fail-validate
|
||||
{desc: "fail-validate",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: failValidateConfigMap.UID,
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgDefaultMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, failValidateConfigMap.UID)},
|
||||
expectConfig: nil},
|
||||
}
|
||||
|
||||
L := len(states)
|
||||
for i := 1; i <= L; i++ { // need one less iteration than the number of states
|
||||
testBothDirections(f, &states[i-1 : i][0], states[i:L])
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
Context("When a remote config becomes the new last-known-good before the Kubelet is updated to use a new, bad config", func() {
|
||||
It("it should report a status and configz indicating that it rolled back to the new last-known-good", func() {
|
||||
var err error
|
||||
// we base the "lkg" configmap off of the current configuration, but set the trial
|
||||
// duration very low so that it quickly becomes the last-known-good
|
||||
lkgKC := originalKC.DeepCopy()
|
||||
lkgKC.ConfigTrialDuration = &metav1.Duration{Duration: time.Nanosecond}
|
||||
lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-intended-lkg", lkgKC)
|
||||
lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// bad config map, we insert some bogus stuff into the configMap
|
||||
badConfigMap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "dynamic-kubelet-config-test-bad"},
|
||||
Data: map[string]string{
|
||||
"kubelet": "{0xdeadbeef}",
|
||||
},
|
||||
}
|
||||
badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(badConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
states := []configState{
|
||||
// intended lkg
|
||||
{desc: "intended last-known-good",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: lkgConfigMap.UID,
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, lkgConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: lkgKC},
|
||||
|
||||
// bad config
|
||||
{desc: "bad config",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: badConfigMap.UID,
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, lkgConfigMap.UID),
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, badConfigMap.UID)},
|
||||
expectConfig: lkgKC},
|
||||
}
|
||||
|
||||
testBothDirections(f, &states[0], states[1:])
|
||||
})
|
||||
})
|
||||
|
||||
// This stress test will help turn up resource leaks across kubelet restarts that can, over time,
|
||||
// break our ability to dynamically update kubelet config
|
||||
Context("When changing the configuration 100 times", func() {
|
||||
It("the Kubelet should report the appropriate status and configz", func() {
|
||||
var err error
|
||||
|
||||
// we just create two configmaps with the same config but different names and toggle between them
|
||||
kc1 := originalKC.DeepCopy()
|
||||
cm1 := newKubeletConfigMap("dynamic-kubelet-config-test-cm1", kc1)
|
||||
cm1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm1)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// slightly change the config
|
||||
kc2 := kc1.DeepCopy()
|
||||
kc2.EventRecordQPS = kc1.EventRecordQPS + 1
|
||||
cm2 := newKubeletConfigMap("dynamic-kubelet-config-test-cm2", kc2)
|
||||
cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm2)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
states := []configState{
|
||||
{desc: "cm1",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: cm1.UID,
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm1.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: kc1},
|
||||
{desc: "cm2",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: cm2.UID,
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm2.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: kc2},
|
||||
}
|
||||
|
||||
for i := 0; i < 50; i++ { // change the config 101 times (changes 3 times in the first iteration, 2 times in each subsequent iteration)
|
||||
testBothDirections(f, &states[0], states[1:])
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// testBothDirections tests the state change represented by each edge, where each state is a vertex,
|
||||
// and there are edges in each direction between first and each of the states.
|
||||
func testBothDirections(f *framework.Framework, first *configState, states []configState) {
|
||||
// set to first and check that everything got set up properly
|
||||
By(fmt.Sprintf("setting configSource to state %q", first.desc))
|
||||
setAndTestKubeletConfigState(f, first)
|
||||
|
||||
// for each state, set to that state, check condition and configz, then reset to first and check again
|
||||
for i := range states {
|
||||
By(fmt.Sprintf("from %q to %q", first.desc, states[i].desc))
|
||||
setAndTestKubeletConfigState(f, &states[i])
|
||||
|
||||
By(fmt.Sprintf("back to %q from %q", first.desc, states[i].desc))
|
||||
setAndTestKubeletConfigState(f, first)
|
||||
}
|
||||
}
|
||||
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the ConfigOK condition
|
||||
// and (if appropriate) configuration exposed via conifgz are as expected.
|
||||
// The configuration will be converted to the internal type prior to comparison.
|
||||
func setAndTestKubeletConfigState(f *framework.Framework, state *configState) {
|
||||
// set the desired state, retry a few times in case we are competing with other editors
|
||||
Eventually(func() error {
|
||||
if err := setNodeConfigSource(f, state.configSource); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second).Should(BeNil())
|
||||
// check that config source actually got set to what we expect
|
||||
checkNodeConfigSource(f, state.configSource)
|
||||
// check condition
|
||||
checkConfigOKCondition(f, state.expectConfigOK)
|
||||
// check expectConfig
|
||||
if state.expectConfig != nil {
|
||||
checkConfig(f, state.expectConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure the node's config source matches what we expect, after setting it
|
||||
func checkNodeConfigSource(f *framework.Framework, expect *apiv1.NodeConfigSource) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
)
|
||||
|
||||
Eventually(func() error {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
actual := node.Spec.ConfigSource
|
||||
if !reflect.DeepEqual(expect, actual) {
|
||||
return fmt.Errorf(spew.Sprintf("expected %#v but got %#v", expect, actual))
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// make sure the ConfigOK node condition eventually matches what we expect
|
||||
func checkConfigOKCondition(f *framework.Framework, expect *apiv1.NodeCondition) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
)
|
||||
|
||||
Eventually(func() error {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
actual := getConfigOKCondition(node.Status.Conditions)
|
||||
if actual == nil {
|
||||
return fmt.Errorf("ConfigOK condition not found on node %q", framework.TestContext.NodeName)
|
||||
}
|
||||
if err := expectConfigOK(expect, actual); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// if the actual matches the expect, return nil, else error explaining the mismatch
|
||||
// if a subfield of the expect is the empty string, that check is skipped
|
||||
func expectConfigOK(expect, actual *apiv1.NodeCondition) error {
|
||||
if expect.Status != actual.Status {
|
||||
return fmt.Errorf("expected condition Status %q but got %q", expect.Status, actual.Status)
|
||||
}
|
||||
if len(expect.Message) > 0 && expect.Message != actual.Message {
|
||||
return fmt.Errorf("expected condition Message %q but got %q", expect.Message, actual.Message)
|
||||
}
|
||||
if len(expect.Reason) > 0 && expect.Reason != actual.Reason {
|
||||
return fmt.Errorf("expected condition Reason %q but got %q", expect.Reason, actual.Reason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// make sure config exposed on configz matches what we expect
|
||||
func checkConfig(f *framework.Framework, expect *kubeletconfig.KubeletConfiguration) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
)
|
||||
Eventually(func() error {
|
||||
actual, err := getCurrentKubeletConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !reflect.DeepEqual(expect, actual) {
|
||||
return fmt.Errorf(spew.Sprintf("expected %#v but got %#v", expect, actual))
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
309
vendor/k8s.io/kubernetes/test/e2e_node/e2e_node_suite_test.go
generated
vendored
Normal file
309
vendor/k8s.io/kubernetes/test/e2e_node/e2e_node_suite_test.go
generated
vendored
Normal file
@ -0,0 +1,309 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// To run tests in this suite
|
||||
// NOTE: This test suite requires password-less sudo capabilities to run the kubelet and kube-apiserver.
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/services"
|
||||
"k8s.io/kubernetes/test/e2e_node/system"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/kardianos/osext"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
morereporters "github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var e2es *services.E2EServices
|
||||
|
||||
// TODO(random-liu): Change the following modes to sub-command.
|
||||
var runServicesMode = flag.Bool("run-services-mode", false, "If true, only run services (etcd, apiserver) in current process, and not run test.")
|
||||
var runKubeletMode = flag.Bool("run-kubelet-mode", false, "If true, only start kubelet, and not run test.")
|
||||
var systemValidateMode = flag.Bool("system-validate-mode", false, "If true, only run system validation in current process, and not run test.")
|
||||
var systemSpecFile = flag.String("system-spec-file", "", "The name of the system spec file that will be used for node conformance test. If it's unspecified or empty, the default system spec (system.DefaultSysSpec) will be used.")
|
||||
|
||||
func init() {
|
||||
framework.RegisterCommonFlags()
|
||||
framework.RegisterNodeFlags()
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||
// Mark the run-services-mode flag as hidden to prevent user from using it.
|
||||
pflag.CommandLine.MarkHidden("run-services-mode")
|
||||
// It's weird that if I directly use pflag in TestContext, it will report error.
|
||||
// It seems that someone is using flag.Parse() after init() and TestMain().
|
||||
// TODO(random-liu): Find who is using flag.Parse() and cause errors and move the following logic
|
||||
// into TestContext.
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
pflag.Parse()
|
||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// When running the containerized conformance test, we'll mount the
|
||||
// host root filesystem as readonly to /rootfs.
|
||||
const rootfs = "/rootfs"
|
||||
|
||||
func TestE2eNode(t *testing.T) {
|
||||
if *runServicesMode {
|
||||
// If run-services-mode is specified, only run services in current process.
|
||||
services.RunE2EServices()
|
||||
return
|
||||
}
|
||||
if *runKubeletMode {
|
||||
// If run-kubelet-mode is specified, only start kubelet.
|
||||
services.RunKubelet()
|
||||
return
|
||||
}
|
||||
if *systemValidateMode {
|
||||
// If system-validate-mode is specified, only run system validation in current process.
|
||||
spec := &system.DefaultSysSpec
|
||||
if *systemSpecFile != "" {
|
||||
var err error
|
||||
spec, err = loadSystemSpecFromFile(*systemSpecFile)
|
||||
if err != nil {
|
||||
glog.Exitf("Failed to load system spec: %v", err)
|
||||
}
|
||||
}
|
||||
if framework.TestContext.NodeConformance {
|
||||
// Chroot to /rootfs to make system validation can check system
|
||||
// as in the root filesystem.
|
||||
// TODO(random-liu): Consider to chroot the whole test process to make writing
|
||||
// test easier.
|
||||
if err := syscall.Chroot(rootfs); err != nil {
|
||||
glog.Exitf("chroot %q failed: %v", rootfs, err)
|
||||
}
|
||||
}
|
||||
if _, err := system.ValidateSpec(*spec, framework.TestContext.ContainerRuntime); err != nil {
|
||||
glog.Exitf("system validation failed: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
// If run-services-mode is not specified, run test.
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
RegisterFailHandler(Fail)
|
||||
reporters := []Reporter{}
|
||||
reportDir := framework.TestContext.ReportDir
|
||||
if reportDir != "" {
|
||||
// Create the directory if it doesn't already exists
|
||||
if err := os.MkdirAll(reportDir, 0755); err != nil {
|
||||
glog.Errorf("Failed creating report directory: %v", err)
|
||||
} else {
|
||||
// Configure a junit reporter to write to the directory
|
||||
junitFile := fmt.Sprintf("junit_%s_%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode)
|
||||
junitPath := path.Join(reportDir, junitFile)
|
||||
reporters = append(reporters, morereporters.NewJUnitReporter(junitPath))
|
||||
}
|
||||
}
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "E2eNode Suite", reporters)
|
||||
}
|
||||
|
||||
// Setup the kubelet on the node
|
||||
var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
// Run system validation test.
|
||||
Expect(validateSystem()).To(Succeed(), "system validation")
|
||||
|
||||
// Pre-pull the images tests depend on so we can fail immediately if there is an image pull issue
|
||||
// This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling.
|
||||
if framework.TestContext.PrepullImages {
|
||||
glog.Infof("Pre-pulling images so that they are cached for the tests.")
|
||||
err := PrePullAllImages()
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
}
|
||||
|
||||
// TODO(yifan): Temporary workaround to disable coreos from auto restart
|
||||
// by masking the locksmithd.
|
||||
// We should mask locksmithd when provisioning the machine.
|
||||
maskLocksmithdOnCoreos()
|
||||
|
||||
if *startServices {
|
||||
// If the services are expected to stop after test, they should monitor the test process.
|
||||
// If the services are expected to keep running after test, they should not monitor the test process.
|
||||
e2es = services.NewE2EServices(*stopServices)
|
||||
Expect(e2es.Start()).To(Succeed(), "should be able to start node services.")
|
||||
glog.Infof("Node services started. Running tests...")
|
||||
} else {
|
||||
glog.Infof("Running tests without starting services.")
|
||||
}
|
||||
|
||||
glog.Infof("Wait for the node to be ready")
|
||||
waitForNodeReady()
|
||||
|
||||
// Reference common test to make the import valid.
|
||||
commontest.CurrentSuite = commontest.NodeE2E
|
||||
|
||||
return nil
|
||||
}, func([]byte) {
|
||||
// update test context with node configuration.
|
||||
Expect(updateTestContext()).To(Succeed(), "update test context with node config.")
|
||||
})
|
||||
|
||||
// Tear down the kubelet on the node
|
||||
var _ = SynchronizedAfterSuite(func() {}, func() {
|
||||
if e2es != nil {
|
||||
if *startServices && *stopServices {
|
||||
glog.Infof("Stopping node services...")
|
||||
e2es.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("Tests Finished")
|
||||
})
|
||||
|
||||
// validateSystem runs system validation in a separate process and returns error if validation fails.
|
||||
func validateSystem() error {
|
||||
testBin, err := osext.Executable()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get current binary: %v", err)
|
||||
}
|
||||
// Pass all flags into the child process, so that it will see the same flag set.
|
||||
output, err := exec.Command(testBin, append([]string{"--system-validate-mode"}, os.Args[1:]...)...).CombinedOutput()
|
||||
// The output of system validation should have been formatted, directly print here.
|
||||
fmt.Print(string(output))
|
||||
if err != nil {
|
||||
return fmt.Errorf("system validation failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func maskLocksmithdOnCoreos() {
|
||||
data, err := ioutil.ReadFile("/etc/os-release")
|
||||
if err != nil {
|
||||
// Not all distros contain this file.
|
||||
glog.Infof("Could not read /etc/os-release: %v", err)
|
||||
return
|
||||
}
|
||||
if bytes.Contains(data, []byte("ID=coreos")) {
|
||||
output, err := exec.Command("systemctl", "mask", "--now", "locksmithd").CombinedOutput()
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("should be able to mask locksmithd - output: %q", string(output)))
|
||||
glog.Infof("Locksmithd is masked successfully")
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNodeReady() {
|
||||
const (
|
||||
// nodeReadyTimeout is the time to wait for node to become ready.
|
||||
nodeReadyTimeout = 2 * time.Minute
|
||||
// nodeReadyPollInterval is the interval to check node ready.
|
||||
nodeReadyPollInterval = 1 * time.Second
|
||||
)
|
||||
client, err := getAPIServerClient()
|
||||
Expect(err).NotTo(HaveOccurred(), "should be able to get apiserver client.")
|
||||
Eventually(func() error {
|
||||
node, err := getNode(client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node: %v", err)
|
||||
}
|
||||
if !nodeutil.IsNodeReady(node) {
|
||||
return fmt.Errorf("node is not ready: %+v", node)
|
||||
}
|
||||
return nil
|
||||
}, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
|
||||
}
|
||||
|
||||
// updateTestContext updates the test context with the node name.
|
||||
// TODO(random-liu): Using dynamic kubelet configuration feature to
|
||||
// update test context with node configuration.
|
||||
func updateTestContext() error {
|
||||
client, err := getAPIServerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get apiserver client: %v", err)
|
||||
}
|
||||
// Update test context with current node object.
|
||||
node, err := getNode(client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node: %v", err)
|
||||
}
|
||||
framework.TestContext.NodeName = node.Name // Set node name.
|
||||
// Update test context with current kubelet configuration.
|
||||
// This assumes all tests which dynamically change kubelet configuration
|
||||
// must: 1) run in serial; 2) restore kubelet configuration after test.
|
||||
kubeletCfg, err := getCurrentKubeletConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get kubelet configuration: %v", err)
|
||||
}
|
||||
framework.TestContext.KubeletConfig = *kubeletCfg // Set kubelet config.
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNode gets node object from the apiserver.
|
||||
func getNode(c *clientset.Clientset) (*v1.Node, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
|
||||
if nodes == nil {
|
||||
return nil, fmt.Errorf("the node list is nil.")
|
||||
}
|
||||
Expect(len(nodes.Items) > 1).NotTo(BeTrue(), "should not be more than 1 nodes.")
|
||||
if len(nodes.Items) == 0 {
|
||||
return nil, fmt.Errorf("empty node list: %+v", nodes)
|
||||
}
|
||||
return &nodes.Items[0], nil
|
||||
}
|
||||
|
||||
// getAPIServerClient gets a apiserver client.
|
||||
func getAPIServerClient() (*clientset.Clientset, error) {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load config: %v", err)
|
||||
}
|
||||
client, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client: %v", err)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// loadSystemSpecFromFile returns the system spec from the file with the
|
||||
// filename.
|
||||
func loadSystemSpecFromFile(filename string) (*system.SysSpec, error) {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := utilyaml.ToJSON(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec := new(system.SysSpec)
|
||||
if err := json.Unmarshal(data, spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return spec, nil
|
||||
}
|
33
vendor/k8s.io/kubernetes/test/e2e_node/environment/BUILD
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/test/e2e_node/environment/BUILD
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "environment",
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/environment",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["conformance.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/environment",
|
||||
deps = ["//pkg/kubelet/cadvisor:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
259
vendor/k8s.io/kubernetes/test/e2e_node/environment/conformance.go
generated
vendored
Normal file
259
vendor/k8s.io/kubernetes/test/e2e_node/environment/conformance.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Build the binary with `go build conformance.go`, then run the conformance binary on a node candidate. If compiled
|
||||
// on a non-linux machine, must be cross compiled for the host.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
)
|
||||
|
||||
const success = "\033[0;32mSUCESS\033[0m"
|
||||
const failed = "\033[0;31mFAILED\033[0m"
|
||||
const notConfigured = "\033[0;34mNOT CONFIGURED\033[0m"
|
||||
const skipped = "\033[0;34mSKIPPED\033[0m"
|
||||
|
||||
var checkFlag = flag.String(
|
||||
"check", "all", "what to check for conformance. One or more of all,container-runtime,daemons,dns,firewall,kernel")
|
||||
|
||||
func init() {
|
||||
// Set this to false to undo util/logs.go settings it to true. Prevents cadvisor log spam.
|
||||
// Remove this once util/logs.go stops setting the flag to true.
|
||||
flag.Set("logtostderr", "false")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
// TODO: Should we write an e2e test for this?
|
||||
func main() {
|
||||
o := strings.Split(*checkFlag, ",")
|
||||
errs := check(o...)
|
||||
if len(errs) > 0 {
|
||||
os.Exit(1)
|
||||
} else {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
// check returns errors found while checking the provided components. Will prevent errors to stdout.
|
||||
func check(options ...string) []error {
|
||||
errs := []error{}
|
||||
for _, c := range options {
|
||||
switch c {
|
||||
case "all":
|
||||
errs = appendNotNil(errs, kernel())
|
||||
errs = appendNotNil(errs, containerRuntime())
|
||||
errs = appendNotNil(errs, daemons())
|
||||
errs = appendNotNil(errs, firewall())
|
||||
errs = appendNotNil(errs, dns())
|
||||
case "containerruntime":
|
||||
errs = appendNotNil(errs, containerRuntime())
|
||||
case "daemons":
|
||||
errs = appendNotNil(errs, daemons())
|
||||
case "dns":
|
||||
errs = appendNotNil(errs, dns())
|
||||
case "firewall":
|
||||
errs = appendNotNil(errs, firewall())
|
||||
case "kernel":
|
||||
errs = appendNotNil(errs, kernel())
|
||||
default:
|
||||
fmt.Printf("Unrecognized option %s", c)
|
||||
errs = append(errs, fmt.Errorf("Unrecognized option %s", c))
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
const dockerVersionRegex = `1\.[7-9]\.[0-9]+`
|
||||
|
||||
// containerRuntime checks that a suitable container runtime is installed and recognized by cadvisor: docker 1.7-1.9
|
||||
func containerRuntime() error {
|
||||
dockerRegex, err := regexp.Compile(dockerVersionRegex)
|
||||
if err != nil {
|
||||
// This should never happen and can only be fixed by changing the code
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Setup cadvisor to check the container environment
|
||||
c, err := cadvisor.New("", 0 /*don't start the http server*/, cadvisor.NewImageFsInfoProvider("docker", ""), "/var/lib/kubelet", false)
|
||||
if err != nil {
|
||||
return printError("Container Runtime Check: %s Could not start cadvisor %v", failed, err)
|
||||
}
|
||||
|
||||
vi, err := c.VersionInfo()
|
||||
if err != nil {
|
||||
return printError("Container Runtime Check: %s Could not get VersionInfo %v", failed, err)
|
||||
}
|
||||
|
||||
d := vi.DockerVersion
|
||||
if !dockerRegex.Match([]byte(d)) {
|
||||
return printError(
|
||||
"Container Runtime Check: %s Docker version %s does not matching %s. You may need to run as root or the "+
|
||||
"user the kubelet will run under.", failed, d, dockerVersionRegex)
|
||||
}
|
||||
|
||||
return printSuccess("Container Runtime Check: %s", success)
|
||||
}
|
||||
|
||||
const kubeletClusterDnsRegexStr = `\/kubelet.*--cluster-dns=(\S+) `
|
||||
const kubeletClusterDomainRegexStr = `\/kubelet.*--cluster-domain=(\S+)`
|
||||
|
||||
// dns checks that cluster dns has been properly configured and can resolve the kubernetes.default service
|
||||
func dns() error {
|
||||
dnsRegex, err := regexp.Compile(kubeletClusterDnsRegexStr)
|
||||
if err != nil {
|
||||
// This should never happen and can only be fixed by changing the code
|
||||
panic(err)
|
||||
}
|
||||
domainRegex, err := regexp.Compile(kubeletClusterDomainRegexStr)
|
||||
if err != nil {
|
||||
// This should never happen and can only be fixed by changing the code
|
||||
panic(err)
|
||||
}
|
||||
|
||||
h, err := net.LookupHost("kubernetes.default")
|
||||
if err == nil {
|
||||
return printSuccess("Dns Check (Optional): %s", success)
|
||||
}
|
||||
if len(h) > 0 {
|
||||
return printSuccess("Dns Check (Optional): %s", success)
|
||||
}
|
||||
|
||||
kubecmd, err := exec.Command("ps", "aux").CombinedOutput()
|
||||
|
||||
// look for the dns flag and parse the value
|
||||
dns := dnsRegex.FindStringSubmatch(string(kubecmd))
|
||||
if len(dns) < 2 {
|
||||
return printSuccess(
|
||||
"Dns Check (Optional): %s No hosts resolve to kubernetes.default. kubelet will need to set "+
|
||||
"--cluster-dns and --cluster-domain when run", notConfigured)
|
||||
}
|
||||
|
||||
// look for the domain flag and parse the value
|
||||
domain := domainRegex.FindStringSubmatch(string(kubecmd))
|
||||
if len(domain) < 2 {
|
||||
return printSuccess(
|
||||
"Dns Check (Optional): %s No hosts resolve to kubernetes.default. kubelet will need to set "+
|
||||
"--cluster-dns and --cluster-domain when run", notConfigured)
|
||||
}
|
||||
|
||||
// do a lookup with the flags the kubelet is running with
|
||||
nsArgs := []string{"-q=a", fmt.Sprintf("kubernetes.default.%s", domain[1]), dns[1]}
|
||||
if err = exec.Command("nslookup", nsArgs...).Run(); err != nil {
|
||||
// Mark this as failed since there was a clear intention to set it up, but it is done so improperly
|
||||
return printError(
|
||||
"Dns Check (Optional): %s No hosts resolve to kubernetes.default kubelet found, but cannot resolve "+
|
||||
"kubernetes.default using nslookup %s error: %v", failed, strings.Join(nsArgs, " "), err)
|
||||
}
|
||||
|
||||
// Can resolve kubernetes.default using the kubelete dns and domain values
|
||||
return printSuccess("Dns Check (Optional): %s", success)
|
||||
}
|
||||
|
||||
const cmdlineCGroupMemory = `cgroup_enable=memory`
|
||||
|
||||
// kernel checks that the kernel has been configured correctly to support the required cgroup features
|
||||
func kernel() error {
|
||||
cmdline, err := ioutil.ReadFile("/proc/cmdline")
|
||||
if err != nil {
|
||||
return printError("Kernel Command Line Check %s: Could not check /proc/cmdline", failed)
|
||||
}
|
||||
if !strings.Contains(string(cmdline), cmdlineCGroupMemory) {
|
||||
return printError("Kernel Command Line Check %s: cgroup_enable=memory not enabled in /proc/cmdline", failed)
|
||||
}
|
||||
return printSuccess("Kernel Command Line %s", success)
|
||||
}
|
||||
|
||||
const iptablesInputRegexStr = `Chain INPUT \(policy DROP\)`
|
||||
const iptablesForwardRegexStr = `Chain FORWARD \(policy DROP\)`
|
||||
|
||||
// firewall checks that iptables does not have common firewall rules setup that would disrupt traffic
|
||||
func firewall() error {
|
||||
out, err := exec.Command("iptables", "-L", "INPUT").CombinedOutput()
|
||||
if err != nil {
|
||||
return printSuccess("Firewall IPTables Check %s: Could not run iptables", skipped)
|
||||
}
|
||||
inputRegex, err := regexp.Compile(iptablesInputRegexStr)
|
||||
if err != nil {
|
||||
// This should never happen and can only be fixed by changing the code
|
||||
panic(err)
|
||||
}
|
||||
if inputRegex.Match(out) {
|
||||
return printError("Firewall IPTables Check %s: Found INPUT rule matching %s", failed, iptablesInputRegexStr)
|
||||
}
|
||||
|
||||
// Check GCE forward rules
|
||||
out, err = exec.Command("iptables", "-L", "FORWARD").CombinedOutput()
|
||||
if err != nil {
|
||||
return printSuccess("Firewall IPTables Check %s: Could not run iptables", skipped)
|
||||
}
|
||||
forwardRegex, err := regexp.Compile(iptablesForwardRegexStr)
|
||||
if err != nil {
|
||||
// This should never happen and can only be fixed by changing the code
|
||||
panic(err)
|
||||
}
|
||||
if forwardRegex.Match(out) {
|
||||
return printError("Firewall IPTables Check %s: Found FORWARD rule matching %s", failed, iptablesInputRegexStr)
|
||||
}
|
||||
|
||||
return printSuccess("Firewall IPTables Check %s", success)
|
||||
}
|
||||
|
||||
// daemons checks that the required node programs are running: kubelet, kube-proxy, and docker
|
||||
func daemons() error {
|
||||
if exec.Command("pgrep", "-f", "kubelet").Run() != nil {
|
||||
return printError("Daemon Check %s: kubelet process not found", failed)
|
||||
}
|
||||
|
||||
if exec.Command("pgrep", "-f", "kube-proxy").Run() != nil {
|
||||
return printError("Daemon Check %s: kube-proxy process not found", failed)
|
||||
}
|
||||
|
||||
return printSuccess("Daemon Check %s", success)
|
||||
}
|
||||
|
||||
// printError provides its arguments to print a format string to the console (newline terminated) and returns an
|
||||
// error with the same string
|
||||
func printError(s string, args ...interface{}) error {
|
||||
es := fmt.Sprintf(s, args...)
|
||||
fmt.Println(es)
|
||||
return errors.New(es)
|
||||
}
|
||||
|
||||
// printSuccess provides its arguments to print a format string to the console (newline terminated) and returns nil
|
||||
func printSuccess(s string, args ...interface{}) error {
|
||||
fmt.Println(fmt.Sprintf(s, args...))
|
||||
return nil
|
||||
}
|
||||
|
||||
// appendNotNil appends err to errs iff err is not nil
|
||||
func appendNotNil(errs []error, err error) []error {
|
||||
if err != nil {
|
||||
return append(errs, err)
|
||||
}
|
||||
return errs
|
||||
}
|
110
vendor/k8s.io/kubernetes/test/e2e_node/environment/setup_host.sh
generated
vendored
Executable file
110
vendor/k8s.io/kubernetes/test/e2e_node/environment/setup_host.sh
generated
vendored
Executable file
@ -0,0 +1,110 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Script used to configure node e2e test hosts from gce base images.
|
||||
# DISCLAIMER: This script is not actively tested or maintained. No guarantees that this will work
|
||||
# on any host environment. Contributions encouraged! Send PRs to pwittrock (github.com).
|
||||
#
|
||||
# At some point has successfully configured the following distros:
|
||||
# - ubuntu trusty
|
||||
# - containervm (no-op)
|
||||
# - rhel 7
|
||||
# - centos 7
|
||||
# - debian jessie
|
||||
|
||||
# RHEL os detection
|
||||
cat /etc/*-release | grep "ID=\"rhel\""
|
||||
OS_RHEL=$?
|
||||
|
||||
# On a systemd environment, enable cpu and memory accounting for all processes by default.
|
||||
if [ -d /etc/systemd ]; then
|
||||
cat <<EOF >kubernetes-accounting.conf
|
||||
[Manager]
|
||||
DefaultCPUAccounting=yes
|
||||
DefaultMemoryAccounting=yes
|
||||
EOF
|
||||
sudo mkdir -p /etc/systemd/system.conf.d/
|
||||
sudo cp kubernetes-accounting.conf /etc/systemd/system.conf.d
|
||||
sudo systemctl daemon-reload
|
||||
fi
|
||||
|
||||
# For coreos, disable updates
|
||||
if $(sudo systemctl status update-engine &>/dev/null); then
|
||||
sudo systemctl mask update-engine locksmithd
|
||||
fi
|
||||
|
||||
# Fixup sudoers require tty
|
||||
sudo grep -q "# Defaults requiretty" /etc/sudoers
|
||||
if [ $? -ne 0 ] ; then
|
||||
sudo sed -i 's/Defaults requiretty/# Defaults requiretty/' /etc/sudoers
|
||||
fi
|
||||
|
||||
# Install nsenter for ubuntu images
|
||||
cat /etc/*-release | grep "ID=ubuntu"
|
||||
if [ $? -eq 0 ]; then
|
||||
if ! which nsenter > /dev/null; then
|
||||
echo "Do not find nsenter. Install it."
|
||||
mkdir -p /tmp/nsenter-install
|
||||
cd /tmp/nsenter-install
|
||||
curl https://www.kernel.org/pub/linux/utils/util-linux/v2.24/util-linux-2.24.tar.gz | tar -zxf-
|
||||
sudo apt-get update
|
||||
sudo apt-get --yes install make
|
||||
sudo apt-get --yes install gcc
|
||||
cd util-linux-2.24
|
||||
./configure --without-ncurses
|
||||
make nsenter
|
||||
sudo cp nsenter /usr/local/bin
|
||||
rm -rf /tmp/nsenter-install
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install docker
|
||||
hash docker 2>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
# RHEL platforms should always install from RHEL repository
|
||||
# This will install the latest supported stable docker platform on RHEL
|
||||
if [ $OS_RHEL -eq 0 ]; then
|
||||
sudo yum install -y docker-latest
|
||||
sudo groupadd docker
|
||||
sudo systemctl enable docker-latest.service
|
||||
sudo systemctl start docker-latest.service
|
||||
else
|
||||
curl -fsSL https://get.docker.com/ | sh
|
||||
sudo service docker start
|
||||
sudo systemctl enable docker.service
|
||||
fi
|
||||
fi
|
||||
|
||||
# Allow jenkins access to docker
|
||||
id jenkins || sudo useradd jenkins -m
|
||||
sudo usermod -a -G docker jenkins
|
||||
|
||||
# install lxc
|
||||
cat /etc/*-release | grep "ID=debian"
|
||||
if [ $? -ne 0 ]; then
|
||||
hash apt-get 2>/dev/null
|
||||
if [ $? -ne 1 ]; then
|
||||
sudo apt-get install lxc -y
|
||||
lxc-checkconfig
|
||||
sudo sed -i 's/GRUB_CMDLINE_LINUX="\(.*\)"/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory"/' /etc/default/grub
|
||||
sudo update-grub
|
||||
fi
|
||||
fi
|
||||
|
||||
# delete init kubelet from containervm so that is doesn't startup
|
||||
if [ -f /etc/init.d/kubelet ]; then
|
||||
sudo rm /etc/init.d/kubelet
|
||||
fi
|
670
vendor/k8s.io/kubernetes/test/e2e_node/eviction_test.go
generated
vendored
Normal file
670
vendor/k8s.io/kubernetes/test/e2e_node/eviction_test.go
generated
vendored
Normal file
@ -0,0 +1,670 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Eviction Policy is described here:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/kubelet-eviction.md
|
||||
|
||||
const (
|
||||
postTestConditionMonitoringPeriod = 1 * time.Minute
|
||||
evictionPollInterval = 2 * time.Second
|
||||
pressureDissapearTimeout = 1 * time.Minute
|
||||
longPodDeletionTimeout = 10 * time.Minute
|
||||
// pressure conditions often surface after evictions because the kubelet only updates
|
||||
// node conditions periodically.
|
||||
// we wait this period after evictions to make sure that we wait out this delay
|
||||
pressureDelay = 20 * time.Second
|
||||
testContextFmt = "when we run containers that should cause %s"
|
||||
noPressure = v1.NodeConditionType("NoPressure")
|
||||
lotsOfDisk = 10240 // 10 Gb in Mb
|
||||
)
|
||||
|
||||
// InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods.
|
||||
// Node disk pressure is induced by consuming all inodes on the node.
|
||||
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
f := framework.NewDefaultFramework("inode-eviction-test")
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
pressureTimeout := 15 * time.Minute
|
||||
inodesConsumed := uint64(200000)
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
||||
summary := eventuallyGetSummary()
|
||||
inodesFree := *summary.Node.Fs.InodesFree
|
||||
if inodesFree <= inodesConsumed {
|
||||
framework.Skipf("Too few inodes free on the host for the InodeEviction test to run")
|
||||
}
|
||||
initialConfig.EvictionHard = map[string]string{"nodefs.inodesFree": fmt.Sprintf("%d", inodesFree-inodesConsumed)}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logInodeMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: inodeConsumingPod("container-inode-hog", nil),
|
||||
},
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: inodeConsumingPod("volume-inode-hog", &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0,
|
||||
pod: innocentPod(),
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods.
|
||||
// Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
|
||||
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
|
||||
expectedNodeCondition := v1.NodeMemoryPressure
|
||||
pressureTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
|
||||
kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory]
|
||||
// The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb
|
||||
// We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb
|
||||
kubeReserved.Sub(resource.MustParse("300Mi"))
|
||||
initialConfig.KubeReserved = map[string]string{
|
||||
string(v1.ResourceMemory): kubeReserved.String(),
|
||||
}
|
||||
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
|
||||
initialConfig.CgroupsPerQOS = true
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0,
|
||||
pod: innocentPod(),
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||
// Disk pressure is induced by running pods which consume disk space.
|
||||
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
diskConsumed := resource.MustParse("100Mi")
|
||||
summary := eventuallyGetSummary()
|
||||
availableBytes := *(summary.Node.Fs.AvailableBytes)
|
||||
initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0,
|
||||
pod: innocentPod(),
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
|
||||
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
|
||||
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
diskConsumed := resource.MustParse("100Mi")
|
||||
summary := eventuallyGetSummary()
|
||||
availableBytes := *(summary.Node.Fs.AvailableBytes)
|
||||
if availableBytes <= uint64(diskConsumed.Value()) {
|
||||
framework.Skipf("Too little disk free on the host for the LocalStorageSoftEviction test to run")
|
||||
}
|
||||
initialConfig.EvictionSoft = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||
initialConfig.EvictionSoftGracePeriod = map[string]string{"nodefs.available": "1m"}
|
||||
// Defer to the pod default grace period
|
||||
initialConfig.EvictionMaxPodGracePeriod = 30
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
// Ensure that pods are not evicted because of the eviction-hard threshold
|
||||
initialConfig.EvictionHard = map[string]string{}
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0,
|
||||
pod: innocentPod(),
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
|
||||
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Flaky] [Feature:LocalStorageCapacityIsolation]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||
evictionTestTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true
|
||||
initialConfig.EvictionHard = map[string]string{}
|
||||
})
|
||||
sizeLimit := resource.MustParse("100Mi")
|
||||
useOverLimit := 101 /* Mb */
|
||||
useUnderLimit := 99 /* Mb */
|
||||
containerLimit := v1.ResourceList{v1.ResourceEphemeralStorage: sizeLimit}
|
||||
|
||||
runEvictionTest(f, evictionTestTimeout, noPressure, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1, // This pod should be evicted because emptyDir (default storage type) usage violation
|
||||
pod: diskConsumingPod("emptydir-disk-sizelimit", useOverLimit, &v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
|
||||
}, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 1, // This pod should be evicted because of memory emptyDir usage violation
|
||||
pod: diskConsumingPod("emptydir-memory-sizelimit", useOverLimit, &v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: "Memory", SizeLimit: &sizeLimit},
|
||||
}, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 1, // This pod should cross the container limit by writing to its writable layer.
|
||||
pod: diskConsumingPod("container-disk-limit", useOverLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 1, // This pod should hit the container limit by writing to an emptydir
|
||||
pod: diskConsumingPod("container-emptydir-disk-limit", useOverLimit, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
||||
v1.ResourceRequirements{Limits: containerLimit}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0, // This pod should not be evicted because it uses less than its limit
|
||||
pod: diskConsumingPod("emptydir-disk-below-sizelimit", useUnderLimit, &v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
|
||||
}, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0, // This pod should not be evicted because it uses less than its limit
|
||||
pod: diskConsumingPod("container-disk-below-sizelimit", useUnderLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
|
||||
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
||||
// the higher priority pod.
|
||||
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test")
|
||||
expectedNodeCondition := v1.NodeMemoryPressure
|
||||
pressureTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.PodPriority)] = true
|
||||
memoryConsumed := resource.MustParse("600Mi")
|
||||
summary := eventuallyGetSummary()
|
||||
availableBytes := *(summary.Node.Memory.AvailableBytes)
|
||||
if availableBytes <= uint64(memoryConsumed.Value()) {
|
||||
framework.Skipf("Too little memory free on the host for the PriorityMemoryEvictionOrdering test to run")
|
||||
}
|
||||
initialConfig.EvictionHard = map[string]string{"memory.available": fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 2,
|
||||
pod: getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: getMemhogPod("high-priority-memory-hog-pod", "high-priority-memory-hog", v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0,
|
||||
pod: getMemhogPod("guaranteed-pod", "guaranteed-pod", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("300Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("300Mi"),
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
systemPriority := int32(2147483647)
|
||||
specs[1].pod.Spec.Priority = &systemPriority
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, specs)
|
||||
})
|
||||
})
|
||||
|
||||
// PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods.
|
||||
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
||||
// the higher priority pod.
|
||||
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test")
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
pressureTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.PodPriority)] = true
|
||||
initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true
|
||||
diskConsumed := resource.MustParse("350Mi")
|
||||
summary := eventuallyGetSummary()
|
||||
availableBytes := *(summary.Node.Fs.AvailableBytes)
|
||||
if availableBytes <= uint64(diskConsumed.Value()) {
|
||||
framework.Skipf("Too little disk free on the host for the PriorityLocalStorageEvictionOrdering test to run")
|
||||
}
|
||||
initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 2,
|
||||
pod: diskConsumingPod("best-effort-disk", lotsOfDisk, nil, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: diskConsumingPod("high-priority-disk", lotsOfDisk, nil, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0,
|
||||
// Only require 99% accuracy (297/300 Mb) because on some OS distributions, the file itself (excluding contents), consumes disk space.
|
||||
pod: diskConsumingPod("guaranteed-disk", 297 /* Mb */, nil, v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse("300Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse("300Mi"),
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
systemPriority := int32(2147483647)
|
||||
specs[1].pod.Spec.Priority = &systemPriority
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, specs)
|
||||
})
|
||||
})
|
||||
|
||||
// Struct used by runEvictionTest that specifies the pod, and when that pod should be evicted, relative to other pods
|
||||
type podEvictSpec struct {
|
||||
// P0 should never be evicted, P1 shouldn't evict before P2, etc.
|
||||
// If two are ranked at P1, either is permitted to fail before the other.
|
||||
// The test ends when all pods other than p0 have been evicted
|
||||
evictionPriority int
|
||||
pod *v1.Pod
|
||||
}
|
||||
|
||||
// runEvictionTest sets up a testing environment given the provided pods, and checks a few things:
|
||||
// It ensures that the desired expectedNodeCondition is actually triggered.
|
||||
// It ensures that evictionPriority 0 pods are not evicted
|
||||
// It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.)
|
||||
// It ensures that all pods with non-zero evictionPriority are eventually evicted.
|
||||
// runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists
|
||||
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, logFunc func(), testSpecs []podEvictSpec) {
|
||||
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
||||
Context("", func() {
|
||||
BeforeEach(func() {
|
||||
// Nodes do not immediately report local storage capacity
|
||||
// Sleep so that pods requesting local storage do not fail to schedule
|
||||
time.Sleep(30 * time.Second)
|
||||
By("seting up pods to be used by tests")
|
||||
for _, spec := range testSpecs {
|
||||
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
|
||||
f.PodClient().CreateSync(spec.pod)
|
||||
}
|
||||
})
|
||||
|
||||
It("should eventually evict all of the correct pods", func() {
|
||||
By(fmt.Sprintf("Waiting for node to have NodeCondition: %s", expectedNodeCondition))
|
||||
Eventually(func() error {
|
||||
logFunc()
|
||||
if expectedNodeCondition == noPressure || hasNodeCondition(f, expectedNodeCondition) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("NodeCondition: %s not encountered", expectedNodeCondition)
|
||||
}, pressureTimeout, evictionPollInterval).Should(BeNil())
|
||||
|
||||
By("Waiting for evictions to occur")
|
||||
Eventually(func() error {
|
||||
if expectedNodeCondition != noPressure {
|
||||
if hasNodeCondition(f, expectedNodeCondition) {
|
||||
framework.Logf("Node has %s", expectedNodeCondition)
|
||||
} else {
|
||||
framework.Logf("Node does NOT have %s", expectedNodeCondition)
|
||||
}
|
||||
}
|
||||
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
logFunc()
|
||||
return verifyEvictionOrdering(f, testSpecs)
|
||||
}, pressureTimeout, evictionPollInterval).Should(BeNil())
|
||||
|
||||
// We observe pressure from the API server. The eviction manager observes pressure from the kubelet internal stats.
|
||||
// This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager
|
||||
// evicts a pod, and when we observe the pressure by querying the API server. Add a delay here to account for this delay
|
||||
By("making sure pressure from test has surfaced before continuing")
|
||||
time.Sleep(pressureDelay)
|
||||
|
||||
By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition))
|
||||
Eventually(func() error {
|
||||
logFunc()
|
||||
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||
return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition)
|
||||
}
|
||||
return nil
|
||||
}, pressureDissapearTimeout, evictionPollInterval).Should(BeNil())
|
||||
|
||||
By("checking for stable, pressure-free condition without unexpected pod failures")
|
||||
Consistently(func() error {
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||
return fmt.Errorf("%s dissappeared and then reappeared", expectedNodeCondition)
|
||||
}
|
||||
logFunc()
|
||||
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
return verifyEvictionOrdering(f, testSpecs)
|
||||
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("deleting pods")
|
||||
for _, spec := range testSpecs {
|
||||
By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute)
|
||||
}
|
||||
if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages {
|
||||
// The disk eviction test may cause the prepulled images to be evicted,
|
||||
// prepull those images again to ensure this test not affect following tests.
|
||||
PrePullAllImages()
|
||||
}
|
||||
By("making sure we can start a new pod after the test")
|
||||
podName := "test-admit-pod"
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
if framework.TestContext.DumpLogsOnFailure {
|
||||
logPodEvents(f)
|
||||
logNodeEvents(f)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// verifyEvictionOrdering returns an error if all non-zero priority pods have not been evicted, nil otherwise
|
||||
// This function panics (via Expect) if eviction ordering is violated, or if a priority-zero pod fails.
|
||||
func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) error {
|
||||
// Gather current information
|
||||
updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedPods := updatedPodList.Items
|
||||
for _, p := range updatedPods {
|
||||
framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
|
||||
}
|
||||
|
||||
By("checking eviction ordering and ensuring important pods dont fail")
|
||||
done := true
|
||||
for _, priorityPodSpec := range testSpecs {
|
||||
var priorityPod v1.Pod
|
||||
for _, p := range updatedPods {
|
||||
if p.Name == priorityPodSpec.pod.Name {
|
||||
priorityPod = p
|
||||
}
|
||||
}
|
||||
Expect(priorityPod).NotTo(BeNil())
|
||||
|
||||
// Check eviction ordering.
|
||||
// Note: it is alright for a priority 1 and priority 2 pod (for example) to fail in the same round,
|
||||
// but never alright for a priority 1 pod to fail while the priority 2 pod is still running
|
||||
for _, lowPriorityPodSpec := range testSpecs {
|
||||
var lowPriorityPod v1.Pod
|
||||
for _, p := range updatedPods {
|
||||
if p.Name == lowPriorityPodSpec.pod.Name {
|
||||
lowPriorityPod = p
|
||||
}
|
||||
}
|
||||
Expect(lowPriorityPod).NotTo(BeNil())
|
||||
if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
|
||||
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
|
||||
fmt.Sprintf("priority %d pod: %s failed before priority %d pod: %s",
|
||||
priorityPodSpec.evictionPriority, priorityPodSpec.pod.Name, lowPriorityPodSpec.evictionPriority, lowPriorityPodSpec.pod.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// EvictionPriority 0 pods should not fail
|
||||
if priorityPodSpec.evictionPriority == 0 {
|
||||
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
|
||||
fmt.Sprintf("priority 0 pod: %s failed", priorityPod.Name))
|
||||
}
|
||||
|
||||
// If a pod that is not evictionPriority 0 has not been evicted, we are not done
|
||||
if priorityPodSpec.evictionPriority != 0 && priorityPod.Status.Phase != v1.PodFailed {
|
||||
done = false
|
||||
}
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("pods that should be evicted are still running")
|
||||
}
|
||||
|
||||
// Returns TRUE if the node has the node condition, FALSE otherwise
|
||||
func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool {
|
||||
localNodeStatus := getLocalNode(f).Status
|
||||
_, actualNodeCondition := nodeutil.GetNodeCondition(&localNodeStatus, expectedNodeCondition)
|
||||
Expect(actualNodeCondition).NotTo(BeNil())
|
||||
return actualNodeCondition.Status == v1.ConditionTrue
|
||||
}
|
||||
|
||||
func logInodeMetrics() {
|
||||
summary, err := getNodeSummary()
|
||||
if err != nil {
|
||||
framework.Logf("Error getting summary: %v", err)
|
||||
return
|
||||
}
|
||||
if summary.Node.Runtime != nil && summary.Node.Runtime.ImageFs != nil && summary.Node.Runtime.ImageFs.Inodes != nil && summary.Node.Runtime.ImageFs.InodesFree != nil {
|
||||
framework.Logf("imageFsInfo.Inodes: %d, imageFsInfo.InodesFree: %d", *summary.Node.Runtime.ImageFs.Inodes, *summary.Node.Runtime.ImageFs.InodesFree)
|
||||
}
|
||||
if summary.Node.Fs != nil && summary.Node.Fs.Inodes != nil && summary.Node.Fs.InodesFree != nil {
|
||||
framework.Logf("rootFsInfo.Inodes: %d, rootFsInfo.InodesFree: %d", *summary.Node.Fs.Inodes, *summary.Node.Fs.InodesFree)
|
||||
}
|
||||
for _, pod := range summary.Pods {
|
||||
framework.Logf("Pod: %s", pod.PodRef.Name)
|
||||
for _, container := range pod.Containers {
|
||||
if container.Rootfs != nil && container.Rootfs.InodesUsed != nil {
|
||||
framework.Logf("--- summary Container: %s inodeUsage: %d", container.Name, *container.Rootfs.InodesUsed)
|
||||
}
|
||||
}
|
||||
for _, volume := range pod.VolumeStats {
|
||||
if volume.FsStats.InodesUsed != nil {
|
||||
framework.Logf("--- summary Volume: %s inodeUsage: %d", volume.Name, *volume.FsStats.InodesUsed)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logDiskMetrics() {
|
||||
summary, err := getNodeSummary()
|
||||
if err != nil {
|
||||
framework.Logf("Error getting summary: %v", err)
|
||||
return
|
||||
}
|
||||
if summary.Node.Runtime != nil && summary.Node.Runtime.ImageFs != nil && summary.Node.Runtime.ImageFs.CapacityBytes != nil && summary.Node.Runtime.ImageFs.AvailableBytes != nil {
|
||||
framework.Logf("imageFsInfo.CapacityBytes: %d, imageFsInfo.AvailableBytes: %d", *summary.Node.Runtime.ImageFs.CapacityBytes, *summary.Node.Runtime.ImageFs.AvailableBytes)
|
||||
}
|
||||
if summary.Node.Fs != nil && summary.Node.Fs.CapacityBytes != nil && summary.Node.Fs.AvailableBytes != nil {
|
||||
framework.Logf("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d", *summary.Node.Fs.CapacityBytes, *summary.Node.Fs.AvailableBytes)
|
||||
}
|
||||
for _, pod := range summary.Pods {
|
||||
framework.Logf("Pod: %s", pod.PodRef.Name)
|
||||
for _, container := range pod.Containers {
|
||||
if container.Rootfs != nil && container.Rootfs.UsedBytes != nil {
|
||||
framework.Logf("--- summary Container: %s UsedBytes: %d", container.Name, *container.Rootfs.UsedBytes)
|
||||
}
|
||||
}
|
||||
for _, volume := range pod.VolumeStats {
|
||||
if volume.FsStats.InodesUsed != nil {
|
||||
framework.Logf("--- summary Volume: %s UsedBytes: %d", volume.Name, *volume.FsStats.UsedBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logMemoryMetrics() {
|
||||
summary, err := getNodeSummary()
|
||||
if err != nil {
|
||||
framework.Logf("Error getting summary: %v", err)
|
||||
return
|
||||
}
|
||||
if summary.Node.Memory != nil && summary.Node.Memory.WorkingSetBytes != nil && summary.Node.Memory.AvailableBytes != nil {
|
||||
framework.Logf("Node.Memory.WorkingSetBytes: %d, summary.Node.Memory.AvailableBytes: %d", *summary.Node.Memory.WorkingSetBytes, *summary.Node.Memory.AvailableBytes)
|
||||
}
|
||||
for _, pod := range summary.Pods {
|
||||
framework.Logf("Pod: %s", pod.PodRef.Name)
|
||||
for _, container := range pod.Containers {
|
||||
if container.Memory != nil && container.Memory.WorkingSetBytes != nil {
|
||||
framework.Logf("--- summary Container: %s WorkingSetBytes: %d", container.Name, *container.Memory.WorkingSetBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func eventuallyGetSummary() (s *stats.Summary) {
|
||||
Eventually(func() error {
|
||||
summary, err := getNodeSummary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if summary == nil || summary.Node.Fs == nil || summary.Node.Fs.InodesFree == nil || summary.Node.Fs.AvailableBytes == nil {
|
||||
return fmt.Errorf("some part of data is nil")
|
||||
}
|
||||
s = summary
|
||||
return nil
|
||||
}, time.Minute, evictionPollInterval).Should(BeNil())
|
||||
return
|
||||
}
|
||||
|
||||
// returns a pod that does not use any resources
|
||||
func innocentPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "innocent-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: "innocent-container",
|
||||
Command: []string{
|
||||
"sh",
|
||||
"-c",
|
||||
"while true; do sleep 5; done",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
volumeMountPath = "/test-mnt"
|
||||
volumeName = "test-volume"
|
||||
)
|
||||
|
||||
func inodeConsumingPod(name string, volumeSource *v1.VolumeSource) *v1.Pod {
|
||||
// Each iteration creates an empty file
|
||||
return podWithCommand(volumeSource, v1.ResourceRequirements{}, name, "i=0; while true; do touch %s${i}.txt; sleep 0.001; i=$((i+=1)); done;")
|
||||
}
|
||||
|
||||
func diskConsumingPod(name string, diskConsumedMB int, volumeSource *v1.VolumeSource, resources v1.ResourceRequirements) *v1.Pod {
|
||||
// Each iteration writes 1 Mb, so do diskConsumedMB iterations.
|
||||
return podWithCommand(volumeSource, resources, name, fmt.Sprintf("i=0; while [ $i -lt %d ];", diskConsumedMB)+" do dd if=/dev/urandom of=%s${i} bs=1048576 count=1 2>/dev/null ; i=$(($i+1)); done; while true; do sleep 5; done")
|
||||
}
|
||||
|
||||
// podWithCommand returns a pod with the provided volumeSource and resourceRequirements.
|
||||
// If a volumeSource is provided, then the volumeMountPath to the volume is inserted into the provided command.
|
||||
func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirements, name, command string) *v1.Pod {
|
||||
path := ""
|
||||
volumeMounts := []v1.VolumeMount{}
|
||||
volumes := []v1.Volume{}
|
||||
if volumeSource != nil {
|
||||
path = volumeMountPath
|
||||
volumeMounts = []v1.VolumeMount{{MountPath: volumeMountPath, Name: volumeName}}
|
||||
volumes = []v1.Volume{{Name: volumeName, VolumeSource: *volumeSource}}
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-pod", name)},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: fmt.Sprintf("%s-container", name),
|
||||
Command: []string{
|
||||
"sh",
|
||||
"-c",
|
||||
fmt.Sprintf(command, filepath.Join(path, "file")),
|
||||
},
|
||||
Resources: resources,
|
||||
VolumeMounts: volumeMounts,
|
||||
},
|
||||
},
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e_node/framework.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/test/e2e_node/framework.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-node] "+text, body)
|
||||
}
|
340
vendor/k8s.io/kubernetes/test/e2e_node/garbage_collector_test.go
generated
vendored
Normal file
340
vendor/k8s.io/kubernetes/test/e2e_node/garbage_collector_test.go
generated
vendored
Normal file
@ -0,0 +1,340 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
//TODO (dashpole): Once dynamic config is possible, test different values for maxPerPodContainer and maxContainers
|
||||
// Currently using default values for maxPerPodContainer and maxTotalContainers
|
||||
maxPerPodContainer = 1
|
||||
maxTotalContainers = -1
|
||||
|
||||
defaultRuntimeRequestTimeoutDuration = 1 * time.Minute
|
||||
defaultImagePullProgressDeadline = 1 * time.Minute
|
||||
garbageCollectDuration = 3 * time.Minute
|
||||
setupDuration = 10 * time.Minute
|
||||
runtimePollInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
type testPodSpec struct {
|
||||
podName string
|
||||
// containerPrefix must be unique for each pod, and cannot end in a number.
|
||||
// containerPrefix is used to identify which containers belong to which pod in the test.
|
||||
containerPrefix string
|
||||
// the number of times each container should restart
|
||||
restartCount int32
|
||||
// the number of containers in the test pod
|
||||
numContainers int
|
||||
// a function that returns the number of containers currently on the node (including dead containers).
|
||||
getContainerNames func() ([]string, error)
|
||||
}
|
||||
|
||||
func (pod *testPodSpec) getContainerName(containerNumber int) string {
|
||||
return fmt.Sprintf("%s%d", pod.containerPrefix, containerNumber)
|
||||
}
|
||||
|
||||
type testRun struct {
|
||||
// Name for logging purposes
|
||||
testName string
|
||||
// Pod specs for the test
|
||||
testPods []*testPodSpec
|
||||
}
|
||||
|
||||
// GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here:
|
||||
// http://kubernetes.io/docs/admin/garbage-collection/
|
||||
var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("garbage-collect-test")
|
||||
containerNamePrefix := "gc-test-container-"
|
||||
podNamePrefix := "gc-test-pod-"
|
||||
|
||||
// These suffixes are appended to pod and container names.
|
||||
// They differentiate pods from one another, and allow filtering
|
||||
// by names to identify which containers belong to which pods
|
||||
// They must be unique, and must not end in a number
|
||||
first_suffix := "one-container-no-restarts"
|
||||
second_suffix := "many-containers-many-restarts-one-pod"
|
||||
third_suffix := "many-containers-many-restarts-"
|
||||
tests := []testRun{
|
||||
{
|
||||
testName: "One Non-restarting Container",
|
||||
testPods: []*testPodSpec{
|
||||
{
|
||||
podName: podNamePrefix + first_suffix,
|
||||
containerPrefix: containerNamePrefix + first_suffix,
|
||||
restartCount: 0,
|
||||
numContainers: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Many Restarting Containers",
|
||||
testPods: []*testPodSpec{
|
||||
{
|
||||
podName: podNamePrefix + second_suffix,
|
||||
containerPrefix: containerNamePrefix + second_suffix,
|
||||
restartCount: 4,
|
||||
numContainers: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Many Pods with Many Restarting Containers",
|
||||
testPods: []*testPodSpec{
|
||||
{
|
||||
podName: podNamePrefix + third_suffix + "one",
|
||||
containerPrefix: containerNamePrefix + third_suffix + "one",
|
||||
restartCount: 3,
|
||||
numContainers: 4,
|
||||
},
|
||||
{
|
||||
podName: podNamePrefix + third_suffix + "two",
|
||||
containerPrefix: containerNamePrefix + third_suffix + "two",
|
||||
restartCount: 2,
|
||||
numContainers: 6,
|
||||
},
|
||||
{
|
||||
podName: podNamePrefix + third_suffix + "three",
|
||||
containerPrefix: containerNamePrefix + third_suffix + "three",
|
||||
restartCount: 3,
|
||||
numContainers: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
// TODO (dashpole): Once the Container Runtime Interface (CRI) is complete, generalize run on other runtimes (other than docker)
|
||||
dockerContainerGCTest(f, test)
|
||||
}
|
||||
})
|
||||
|
||||
// Tests the following:
|
||||
// pods are created, and all containers restart the specified number of times
|
||||
// while contianers are running, the number of copies of a single container does not exceed maxPerPodContainer
|
||||
// while containers are running, the total number of containers does not exceed maxTotalContainers
|
||||
// while containers are running, if not constrained by maxPerPodContainer or maxTotalContainers, keep an extra copy of each container
|
||||
// once pods are killed, all containers are eventually cleaned up
|
||||
func containerGCTest(f *framework.Framework, test testRun) {
|
||||
Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
|
||||
BeforeEach(func() {
|
||||
realPods := getPods(test.testPods)
|
||||
f.PodClient().CreateBatch(realPods)
|
||||
By("Making sure all containers restart the specified number of times")
|
||||
Eventually(func() error {
|
||||
for _, podSpec := range test.testPods {
|
||||
err := verifyPodRestartCount(f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, setupDuration, runtimePollInterval).Should(BeNil())
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func() {
|
||||
totalContainers := 0
|
||||
for _, pod := range test.testPods {
|
||||
totalContainers += pod.numContainers*2 + 1
|
||||
}
|
||||
Eventually(func() error {
|
||||
total := 0
|
||||
for _, pod := range test.testPods {
|
||||
containerNames, err := pod.getContainerNames()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
total += len(containerNames)
|
||||
// Check maxPerPodContainer for each container in the pod
|
||||
for i := 0; i < pod.numContainers; i++ {
|
||||
containerCount := 0
|
||||
for _, containerName := range containerNames {
|
||||
if strings.Contains(containerName, pod.getContainerName(i)) {
|
||||
containerCount += 1
|
||||
}
|
||||
}
|
||||
if containerCount > maxPerPodContainer+1 {
|
||||
return fmt.Errorf("expected number of copies of container: %s, to be <= maxPerPodContainer: %d; list of containers: %v",
|
||||
pod.getContainerName(i), maxPerPodContainer, containerNames)
|
||||
}
|
||||
}
|
||||
}
|
||||
//Check maxTotalContainers. Currently, the default is -1, so this will never happen until we can configure maxTotalContainers
|
||||
if maxTotalContainers > 0 && totalContainers <= maxTotalContainers && total > maxTotalContainers {
|
||||
return fmt.Errorf("expected total number of containers: %v, to be <= maxTotalContainers: %v", total, maxTotalContainers)
|
||||
}
|
||||
return nil
|
||||
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
|
||||
|
||||
if maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers
|
||||
By("Making sure the kubelet consistently keeps around an extra copy of each container.")
|
||||
Consistently(func() error {
|
||||
for _, pod := range test.testPods {
|
||||
containerNames, err := pod.getContainerNames()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < pod.numContainers; i++ {
|
||||
containerCount := 0
|
||||
for _, containerName := range containerNames {
|
||||
if strings.Contains(containerName, pod.getContainerName(i)) {
|
||||
containerCount += 1
|
||||
}
|
||||
}
|
||||
if pod.restartCount > 0 && containerCount < maxPerPodContainer+1 {
|
||||
return fmt.Errorf("expected pod %v to have extra copies of old containers", pod.podName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
for _, pod := range test.testPods {
|
||||
By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
||||
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
By("Making sure all containers get cleaned up")
|
||||
Eventually(func() error {
|
||||
for _, pod := range test.testPods {
|
||||
containerNames, err := pod.getContainerNames()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(containerNames) > 0 {
|
||||
return fmt.Errorf("%v containers still remain", containerNames)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
|
||||
logNodeEvents(f)
|
||||
logPodEvents(f)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Runs containerGCTest using the docker runtime.
|
||||
func dockerContainerGCTest(f *framework.Framework, test testRun) {
|
||||
var runtime libdocker.Interface
|
||||
BeforeEach(func() {
|
||||
runtime = libdocker.ConnectToDockerOrDie(
|
||||
defaultDockerEndpoint,
|
||||
defaultRuntimeRequestTimeoutDuration,
|
||||
defaultImagePullProgressDeadline,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
})
|
||||
for _, pod := range test.testPods {
|
||||
// Initialize the getContainerNames function to use the libdocker api
|
||||
thisPrefix := pod.containerPrefix
|
||||
pod.getContainerNames = func() ([]string, error) {
|
||||
relevantContainers := []string{}
|
||||
dockerContainers, err := libdocker.GetKubeletDockerContainers(runtime, true)
|
||||
if err != nil {
|
||||
return relevantContainers, err
|
||||
}
|
||||
for _, container := range dockerContainers {
|
||||
// only look for containers from this testspec
|
||||
if strings.Contains(container.Names[0], thisPrefix) {
|
||||
relevantContainers = append(relevantContainers, container.Names[0])
|
||||
}
|
||||
}
|
||||
return relevantContainers, nil
|
||||
}
|
||||
}
|
||||
containerGCTest(f, test)
|
||||
}
|
||||
|
||||
func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
||||
for _, spec := range specs {
|
||||
By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
|
||||
containers := []v1.Container{}
|
||||
for i := 0; i < spec.numContainers; i++ {
|
||||
containers = append(containers, v1.Container{
|
||||
Image: busyboxImage,
|
||||
Name: spec.getContainerName(i),
|
||||
Command: getRestartingContainerCommand("/test-empty-dir-mnt", i, spec.restartCount, ""),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
|
||||
},
|
||||
})
|
||||
}
|
||||
pods = append(pods, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: spec.podName},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: containers,
|
||||
Volumes: []v1.Volume{
|
||||
{Name: "test-empty-dir", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getRestartingContainerCommand(path string, containerNum int, restarts int32, loopingCommand string) []string {
|
||||
return []string{
|
||||
"sh",
|
||||
"-c",
|
||||
fmt.Sprintf(`
|
||||
f=%s/countfile%s
|
||||
count=$(echo 'hello' >> $f ; wc -l $f | awk {'print $1'})
|
||||
if [ $count -lt %d ]; then
|
||||
exit 0
|
||||
fi
|
||||
while true; do %s sleep 1; done`,
|
||||
path, strconv.Itoa(containerNum), restarts+1, loopingCommand),
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPodRestartCount(f *framework.Framework, podName string, expectedNumContainers int, expectedRestartCount int32) error {
|
||||
updatedPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(updatedPod.Status.ContainerStatuses) != expectedNumContainers {
|
||||
return fmt.Errorf("expected pod %s to have %d containers, actual: %d",
|
||||
updatedPod.Name, expectedNumContainers, len(updatedPod.Status.ContainerStatuses))
|
||||
}
|
||||
for _, containerStatus := range updatedPod.Status.ContainerStatuses {
|
||||
if containerStatus.RestartCount != expectedRestartCount {
|
||||
return fmt.Errorf("pod %s had container with restartcount %d. Should have been at least %d",
|
||||
updatedPod.Name, containerStatus.RestartCount, expectedRestartCount)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
430
vendor/k8s.io/kubernetes/test/e2e_node/gke_environment_test.go
generated
vendored
Normal file
430
vendor/k8s.io/kubernetes/test/e2e_node/gke_environment_test.go
generated
vendored
Normal file
@ -0,0 +1,430 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/blang/semver"
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// checkProcess checks whether there's a process whose command line contains
|
||||
// the specified pattern and whose parent process id is ppid using the
|
||||
// pre-built information in cmdToProcessMap.
|
||||
func checkProcess(pattern string, ppid int, cmdToProcessMap map[string][]process) error {
|
||||
for cmd, processes := range cmdToProcessMap {
|
||||
if !strings.Contains(cmd, pattern) {
|
||||
continue
|
||||
}
|
||||
for _, p := range processes {
|
||||
if p.ppid == ppid {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to find the process whose cmdline contains %q with ppid = %d", pattern, ppid)
|
||||
}
|
||||
|
||||
// checkIPTables checks whether the functionality required by kube-proxy works
|
||||
// in iptables.
|
||||
func checkIPTables() (err error) {
|
||||
cmds := [][]string{
|
||||
{"iptables", "-N", "KUBE-PORTALS-HOST", "-t", "nat"},
|
||||
{"iptables", "-I", "OUTPUT", "-t", "nat", "-m", "comment", "--comment", "ClusterIPs", "-j", "KUBE-PORTALS-HOST"},
|
||||
{"iptables", "-A", "KUBE-PORTALS-HOST", "-t", "nat", "-m", "comment", "--comment", "test-1:", "-p", "tcp", "-m", "tcp", "--dport", "443", "-d", "10.0.0.1/32", "-j", "DNAT", "--to-destination", "10.240.0.1:11111"},
|
||||
{"iptables", "-C", "KUBE-PORTALS-HOST", "-t", "nat", "-m", "comment", "--comment", "test-1:", "-p", "tcp", "-m", "tcp", "--dport", "443", "-d", "10.0.0.1/32", "-j", "DNAT", "--to-destination", "10.240.0.1:11111"},
|
||||
{"iptables", "-A", "KUBE-PORTALS-HOST", "-t", "nat", "-m", "comment", "--comment", "test-2:", "-p", "tcp", "-m", "tcp", "--dport", "80", "-d", "10.0.0.1/32", "-j", "REDIRECT", "--to-ports", "22222"},
|
||||
{"iptables", "-C", "KUBE-PORTALS-HOST", "-t", "nat", "-m", "comment", "--comment", "test-2:", "-p", "tcp", "-m", "tcp", "--dport", "80", "-d", "10.0.0.1/32", "-j", "REDIRECT", "--to-ports", "22222"},
|
||||
}
|
||||
cleanupCmds := [][]string{
|
||||
{"iptables", "-F", "KUBE-PORTALS-HOST", "-t", "nat"},
|
||||
{"iptables", "-D", "OUTPUT", "-t", "nat", "-m", "comment", "--comment", "ClusterIPs", "-j", "KUBE-PORTALS-HOST"},
|
||||
{"iptables", "-X", "KUBE-PORTALS-HOST", "-t", "nat"},
|
||||
}
|
||||
defer func() {
|
||||
for _, cmd := range cleanupCmds {
|
||||
if _, cleanupErr := runCommand(cmd...); cleanupErr != nil && err == nil {
|
||||
err = cleanupErr
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
for _, cmd := range cmds {
|
||||
if _, err := runCommand(cmd...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// checkPublicGCR checks the access to the public Google Container Registry by
|
||||
// pulling the busybox image.
|
||||
func checkPublicGCR() error {
|
||||
const image = "gcr.io/google-containers/busybox"
|
||||
output, err := runCommand("docker", "images", "-q", image)
|
||||
if len(output) != 0 {
|
||||
if _, err := runCommand("docker", "rmi", "-f", image); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
output, err = runCommand("docker", "pull", image)
|
||||
if len(output) == 0 {
|
||||
return fmt.Errorf("failed to pull %s", image)
|
||||
}
|
||||
if _, err = runCommand("docker", "rmi", "-f", image); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDockerConfig runs docker's check-config.sh script and ensures that all
|
||||
// expected kernel configs are enabled.
|
||||
func checkDockerConfig() error {
|
||||
var (
|
||||
re = regexp.MustCompile("\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[mGK]")
|
||||
bins = []string{
|
||||
"/usr/share/docker.io/contrib/check-config.sh",
|
||||
"/usr/share/docker/contrib/check-config.sh",
|
||||
}
|
||||
whitelist = map[string]bool{
|
||||
"CONFIG_MEMCG_SWAP_ENABLED": true,
|
||||
"CONFIG_RT_GROUP_SCHED": true,
|
||||
"CONFIG_EXT3_FS": true,
|
||||
"CONFIG_EXT3_FS_XATTR": true,
|
||||
"CONFIG_EXT3_FS_POSIX_ACL": true,
|
||||
"CONFIG_EXT3_FS_SECURITY": true,
|
||||
"/dev/zfs": true,
|
||||
"zfs command": true,
|
||||
"zpool command": true,
|
||||
}
|
||||
missing = map[string]bool{}
|
||||
)
|
||||
|
||||
// Whitelists CONFIG_DEVPTS_MULTIPLE_INSTANCES (meaning allowing it to be
|
||||
// absent) if the kernel version is >= 4.8, because this option has been
|
||||
// removed from the 4.8 kernel.
|
||||
kernelVersion, err := getKernelVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if kernelVersion.GTE(semver.MustParse("4.8.0")) {
|
||||
whitelist["CONFIG_DEVPTS_MULTIPLE_INSTANCES"] = true
|
||||
}
|
||||
|
||||
for _, bin := range bins {
|
||||
if _, err := os.Stat(bin); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
// We don't check the return code because it's OK if the script returns
|
||||
// a non-zero exit code just because the configs in the whitelist are
|
||||
// missing.
|
||||
output, _ := runCommand(bin)
|
||||
for _, line := range strings.Split(output, "\n") {
|
||||
if !strings.Contains(line, "missing") {
|
||||
continue
|
||||
}
|
||||
line = re.ReplaceAllString(line, "")
|
||||
fields := strings.Split(line, ":")
|
||||
if len(fields) != 2 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimFunc(fields[0], func(c rune) bool {
|
||||
return c == ' ' || c == '-'
|
||||
})
|
||||
if _, found := whitelist[key]; !found {
|
||||
missing[key] = true
|
||||
}
|
||||
}
|
||||
if len(missing) != 0 {
|
||||
return fmt.Errorf("missing docker config: %v", missing)
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDockerNetworkClient checks client networking by pinging an external IP
|
||||
// address from a container.
|
||||
func checkDockerNetworkClient() error {
|
||||
const imageName = "gcr.io/google-containers/busybox"
|
||||
output, err := runCommand("docker", "run", "--rm", imageName, "sh", "-c", "ping -w 5 -q google.com")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(output, `0% packet loss`) {
|
||||
return fmt.Errorf("failed to ping from container: %s", output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDockerNetworkServer checks server networking by running an echo server
|
||||
// within a container and accessing it from outside.
|
||||
func checkDockerNetworkServer() error {
|
||||
const (
|
||||
imageName = "gcr.io/google-containers/nginx:1.7.9"
|
||||
hostAddr = "127.0.0.1"
|
||||
hostPort = "8088"
|
||||
containerPort = "80"
|
||||
containerID = "nginx"
|
||||
message = "Welcome to nginx!"
|
||||
)
|
||||
var (
|
||||
portMapping = fmt.Sprintf("%s:%s", hostPort, containerPort)
|
||||
host = fmt.Sprintf("http://%s:%s", hostAddr, hostPort)
|
||||
)
|
||||
runCommand("docker", "rm", "-f", containerID)
|
||||
if _, err := runCommand("docker", "run", "-d", "--name", containerID, "-p", portMapping, imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
output, err := runCommand("curl", host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(output, message) {
|
||||
return fmt.Errorf("failed to connect to container")
|
||||
}
|
||||
// Clean up
|
||||
if _, err = runCommand("docker", "rm", "-f", containerID); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = runCommand("docker", "rmi", imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDockerAppArmor checks whether AppArmor is enabled and has the
|
||||
// "docker-default" profile.
|
||||
func checkDockerAppArmor() error {
|
||||
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if string(buf) != "Y\n" {
|
||||
return fmt.Errorf("apparmor module is not loaded")
|
||||
}
|
||||
|
||||
// Checks that the "docker-default" profile is loaded and enforced.
|
||||
buf, err = ioutil.ReadFile("/sys/kernel/security/apparmor/profiles")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(string(buf), "docker-default (enforce)") {
|
||||
return fmt.Errorf("'docker-default' profile is not loaded and enforced")
|
||||
}
|
||||
|
||||
// Checks that the `apparmor_parser` binary is present.
|
||||
_, err = exec.LookPath("apparmor_parser")
|
||||
if err != nil {
|
||||
return fmt.Errorf("'apparmor_parser' is not in directories named by the PATH env")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDockerSeccomp checks whether the Docker supports seccomp.
|
||||
func checkDockerSeccomp() error {
|
||||
const (
|
||||
seccompProfileFileName = "/tmp/no_mkdir.json"
|
||||
seccompProfile = `{
|
||||
"defaultAction": "SCMP_ACT_ALLOW",
|
||||
"syscalls": [
|
||||
{
|
||||
"name": "mkdir",
|
||||
"action": "SCMP_ACT_ERRNO"
|
||||
}
|
||||
]}`
|
||||
image = "gcr.io/google-appengine/debian8:2017-06-07-171918"
|
||||
)
|
||||
if err := ioutil.WriteFile(seccompProfileFileName, []byte(seccompProfile), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
// Starts a container with no seccomp profile and ensures that unshare
|
||||
// succeeds.
|
||||
_, err := runCommand("docker", "run", "--rm", "-i", "--security-opt", "seccomp=unconfined", image, "unshare", "-r", "whoami")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Starts a container with the default seccomp profile and ensures that
|
||||
// unshare (a blacklisted system call in the default profile) fails.
|
||||
cmd := []string{"docker", "run", "--rm", "-i", image, "unshare", "-r", "whoami"}
|
||||
_, err = runCommand(cmd...)
|
||||
if err == nil {
|
||||
return fmt.Errorf("%q did not fail as expected", strings.Join(cmd, " "))
|
||||
}
|
||||
// Starts a container with a custom seccomp profile that blacklists mkdir
|
||||
// and ensures that unshare succeeds.
|
||||
_, err = runCommand("docker", "run", "--rm", "-i", "--security-opt", fmt.Sprintf("seccomp=%s", seccompProfileFileName), image, "unshare", "-r", "whoami")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Starts a container with a custom seccomp profile that blacklists mkdir
|
||||
// and ensures that mkdir fails.
|
||||
cmd = []string{"docker", "run", "--rm", "-i", "--security-opt", fmt.Sprintf("seccomp=%s", seccompProfileFileName), image, "mkdir", "-p", "/tmp/foo"}
|
||||
_, err = runCommand(cmd...)
|
||||
if err == nil {
|
||||
return fmt.Errorf("%q did not fail as expected", strings.Join(cmd, " "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDockerStorageDriver checks whether the current storage driver used by
|
||||
// Docker is overlay.
|
||||
func checkDockerStorageDriver() error {
|
||||
output, err := runCommand("docker", "info")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, line := range strings.Split(string(output), "\n") {
|
||||
if !strings.Contains(line, "Storage Driver:") {
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(line, "overlay") {
|
||||
return fmt.Errorf("storage driver is not 'overlay': %s", line)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to find storage driver")
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("GKE system requirements [Conformance] [Feature:GKEEnv]", func() {
|
||||
BeforeEach(func() {
|
||||
framework.RunIfSystemSpecNameIs("gke")
|
||||
})
|
||||
|
||||
It("The required processes should be running", func() {
|
||||
cmdToProcessMap, err := getCmdToProcessMap()
|
||||
framework.ExpectNoError(err)
|
||||
for _, p := range []struct {
|
||||
cmd string
|
||||
ppid int
|
||||
}{
|
||||
{"google_accounts_daemon", 1},
|
||||
{"google_clock_skew_daemon", 1},
|
||||
{"google_ip_forwarding_daemon", 1},
|
||||
} {
|
||||
framework.ExpectNoError(checkProcess(p.cmd, p.ppid, cmdToProcessMap))
|
||||
}
|
||||
})
|
||||
It("The iptable rules should work (required by kube-proxy)", func() {
|
||||
framework.ExpectNoError(checkIPTables())
|
||||
})
|
||||
It("The GCR is accessible", func() {
|
||||
framework.ExpectNoError(checkPublicGCR())
|
||||
})
|
||||
It("The docker configuration validation should pass", func() {
|
||||
framework.RunIfContainerRuntimeIs("docker")
|
||||
framework.ExpectNoError(checkDockerConfig())
|
||||
})
|
||||
It("The docker container network should work", func() {
|
||||
framework.RunIfContainerRuntimeIs("docker")
|
||||
framework.ExpectNoError(checkDockerNetworkServer())
|
||||
framework.ExpectNoError(checkDockerNetworkClient())
|
||||
})
|
||||
It("The docker daemon should support AppArmor and seccomp", func() {
|
||||
framework.RunIfContainerRuntimeIs("docker")
|
||||
framework.ExpectNoError(checkDockerAppArmor())
|
||||
framework.ExpectNoError(checkDockerSeccomp())
|
||||
})
|
||||
It("The docker storage driver should work", func() {
|
||||
framework.Skipf("GKE does not currently require overlay")
|
||||
framework.ExpectNoError(checkDockerStorageDriver())
|
||||
})
|
||||
})
|
||||
|
||||
// getPPID returns the PPID for the pid.
|
||||
func getPPID(pid int) (int, error) {
|
||||
statusFile := "/proc/" + strconv.Itoa(pid) + "/status"
|
||||
content, err := ioutil.ReadFile(statusFile)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, line := range strings.Split(string(content), "\n") {
|
||||
if !strings.HasPrefix(line, "PPid:") {
|
||||
continue
|
||||
}
|
||||
s := strings.TrimSpace(strings.TrimPrefix(line, "PPid:"))
|
||||
ppid, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ppid, nil
|
||||
}
|
||||
return 0, fmt.Errorf("no PPid in %s", statusFile)
|
||||
}
|
||||
|
||||
// process contains a process ID and its parent's process ID.
|
||||
type process struct {
|
||||
pid int
|
||||
ppid int
|
||||
}
|
||||
|
||||
// getCmdToProcessMap returns a mapping from the process command line to its
|
||||
// process ids.
|
||||
func getCmdToProcessMap() (map[string][]process, error) {
|
||||
root, err := os.Open("/proc")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer root.Close()
|
||||
dirs, err := root.Readdirnames(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make(map[string][]process)
|
||||
for _, dir := range dirs {
|
||||
pid, err := strconv.Atoi(dir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ppid, err := getPPID(pid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
content, err := ioutil.ReadFile("/proc/" + dir + "/cmdline")
|
||||
if err != nil || len(content) == 0 {
|
||||
continue
|
||||
}
|
||||
cmd := string(bytes.Replace(content, []byte("\x00"), []byte(" "), -1))
|
||||
result[cmd] = append(result[cmd], process{pid, ppid})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// getKernelVersion returns the kernel version in the semantic version format.
|
||||
func getKernelVersion() (*semver.Version, error) {
|
||||
output, err := runCommand("uname", "-r")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// An example 'output' could be "4.13.0-1001-gke".
|
||||
v := strings.TrimSpace(strings.Split(output, "-")[0])
|
||||
kernelVersion, err := semver.Make(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert %q to semantic version: %s", v, err)
|
||||
}
|
||||
return &kernelVersion, nil
|
||||
}
|
232
vendor/k8s.io/kubernetes/test/e2e_node/gpu_device_plugin.go
generated
vendored
Normal file
232
vendor/k8s.io/kubernetes/test/e2e_node/gpu_device_plugin.go
generated
vendored
Normal file
@ -0,0 +1,232 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
const (
|
||||
devicePluginFeatureGate = "DevicePlugins=true"
|
||||
testPodNamePrefix = "nvidia-gpu-"
|
||||
)
|
||||
|
||||
// Serial because the test restarts Kubelet
|
||||
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("device-plugin-gpus-errors")
|
||||
|
||||
Context("DevicePlugin", func() {
|
||||
By("Enabling support for Device Plugin")
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.DevicePlugins)] = true
|
||||
})
|
||||
|
||||
var devicePluginPod *v1.Pod
|
||||
BeforeEach(func() {
|
||||
By("Ensuring that Nvidia GPUs exists on the node")
|
||||
if !checkIfNvidiaGPUsExistOnNode() {
|
||||
Skip("Nvidia GPUs do not exist on the node. Skipping test.")
|
||||
}
|
||||
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||
|
||||
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
|
||||
devicePluginPod = f.PodClient().CreateSync(framework.NVIDIADevicePlugin(f.Namespace.Name))
|
||||
|
||||
By("Waiting for GPUs to become available on the local node")
|
||||
Eventually(func() bool {
|
||||
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||
}, 10*time.Second, framework.Poll).Should(BeTrue())
|
||||
|
||||
if framework.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
|
||||
Skip("Not enough GPUs to execute this test (at least two needed)")
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
l, err := f.PodClient().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for _, p := range l.Items {
|
||||
if p.Namespace != f.Namespace.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
f.PodClient().Delete(p.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
})
|
||||
|
||||
It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
|
||||
By("Creating one GPU pod on a node with at least two GPUs")
|
||||
p1 := f.PodClient().CreateSync(makeCudaPauseImage())
|
||||
count1, devId1 := getDeviceId(f, p1.Name, p1.Name, 1)
|
||||
p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Restarting Kubelet and waiting for the current running pod to restart")
|
||||
restartKubelet(f)
|
||||
|
||||
By("Confirming that after a kubelet and pod restart, GPU assignement is kept")
|
||||
count1, devIdRestart1 := getDeviceId(f, p1.Name, p1.Name, count1+1)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
||||
By("Restarting Kubelet and creating another pod")
|
||||
restartKubelet(f)
|
||||
p2 := f.PodClient().CreateSync(makeCudaPauseImage())
|
||||
|
||||
By("Checking that pods got a different GPU")
|
||||
count2, devId2 := getDeviceId(f, p2.Name, p2.Name, 1)
|
||||
Expect(devId1).To(Not(Equal(devId2)))
|
||||
|
||||
By("Deleting device plugin.")
|
||||
f.PodClient().Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
|
||||
By("Waiting for GPUs to become unavailable on the local node")
|
||||
Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return framework.NumberOfNVIDIAGPUs(node) <= 0
|
||||
}, 10*time.Minute, framework.Poll).Should(BeTrue())
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||
count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+1)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 := getDeviceId(f, p2.Name, p2.Name, count2+1)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
By("Restarting Kubelet.")
|
||||
restartKubelet(f)
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
|
||||
count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+2)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
count2, devIdRestart2 = getDeviceId(f, p2.Name, p2.Name, count2+2)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
logDevicePluginMetrics()
|
||||
|
||||
// Cleanup
|
||||
f.PodClient().DeleteSync(p1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(p2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func logDevicePluginMetrics() {
|
||||
ms, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName + ":10255")
|
||||
framework.ExpectNoError(err)
|
||||
for msKey, samples := range ms {
|
||||
switch msKey {
|
||||
case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginAllocationLatencyKey:
|
||||
for _, sample := range samples {
|
||||
latency := sample.Value
|
||||
resource := string(sample.Metric["resource_name"])
|
||||
var quantile float64
|
||||
if val, ok := sample.Metric[model.QuantileLabel]; ok {
|
||||
var err error
|
||||
if quantile, err = strconv.ParseFloat(string(val), 64); err != nil {
|
||||
continue
|
||||
}
|
||||
framework.Logf("Metric: %v ResourceName: %v Quantile: %v Latency: %v", msKey, resource, quantile, latency)
|
||||
}
|
||||
}
|
||||
case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginRegistrationCountKey:
|
||||
for _, sample := range samples {
|
||||
resource := string(sample.Metric["resource_name"])
|
||||
count := sample.Value
|
||||
framework.Logf("Metric: %v ResourceName: %v Count: %v", msKey, resource, count)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeCudaPauseImage() *v1.Pod {
|
||||
podName := testPodNamePrefix + string(uuid.NewUUID())
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: []v1.Container{{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
// Retrieves the gpu devices created in the user pod.
|
||||
// Note the nvidia device plugin implementation doesn't do device id remapping currently.
|
||||
// Will probably need to use nvidia-smi if that changes.
|
||||
Command: []string{"sh", "-c", "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"},
|
||||
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: newDecimalResourceList(framework.NVIDIAGPUResourceName, 1),
|
||||
Requests: newDecimalResourceList(framework.NVIDIAGPUResourceName, 1),
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newDecimalResourceList(name v1.ResourceName, quantity int64) v1.ResourceList {
|
||||
return v1.ResourceList{name: *resource.NewQuantity(quantity, resource.DecimalSI)}
|
||||
}
|
||||
|
||||
// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494
|
||||
func restartKubelet(f *framework.Framework) {
|
||||
stdout, err := exec.Command("sudo", "systemctl", "list-units", "kubelet*", "--state=running").CombinedOutput()
|
||||
framework.ExpectNoError(err)
|
||||
regex := regexp.MustCompile("(kubelet-[0-9]+)")
|
||||
matches := regex.FindStringSubmatch(string(stdout))
|
||||
Expect(len(matches)).NotTo(BeZero())
|
||||
kube := matches[0]
|
||||
framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
|
||||
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
|
||||
framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout)
|
||||
}
|
||||
|
||||
func getDeviceId(f *framework.Framework, podName string, contName string, restartCount int32) (int32, string) {
|
||||
var count int32
|
||||
// Wait till pod has been restarted at least restartCount times.
|
||||
Eventually(func() bool {
|
||||
p, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||
return false
|
||||
}
|
||||
count = p.Status.ContainerStatuses[0].RestartCount
|
||||
return count >= restartCount
|
||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||
}
|
||||
framework.Logf("got pod logs: %v", logs)
|
||||
regex := regexp.MustCompile("gpu devices: (nvidia[0-9]+)")
|
||||
matches := regex.FindStringSubmatch(logs)
|
||||
if len(matches) < 2 {
|
||||
return count, ""
|
||||
}
|
||||
return count, matches[1]
|
||||
}
|
174
vendor/k8s.io/kubernetes/test/e2e_node/gpus.go
generated
vendored
Normal file
174
vendor/k8s.io/kubernetes/test/e2e_node/gpus.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func getGPUsAvailable(f *framework.Framework) int64 {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "getting node list")
|
||||
var gpusAvailable int64
|
||||
for _, node := range nodeList.Items {
|
||||
gpusAvailable += node.Status.Capacity.NvidiaGPU().Value()
|
||||
}
|
||||
return gpusAvailable
|
||||
}
|
||||
|
||||
func gpusExistOnAllNodes(f *framework.Framework) bool {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "getting node list")
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Name == "kubernetes-master" {
|
||||
continue
|
||||
}
|
||||
if node.Status.Capacity.NvidiaGPU().Value() == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkIfNvidiaGPUsExistOnNode() bool {
|
||||
// Cannot use `lspci` because it is not installed on all distros by default.
|
||||
err := exec.Command("/bin/sh", "-c", "find /sys/devices/pci* -type f | grep vendor | xargs cat | grep 0x10de").Run()
|
||||
if err != nil {
|
||||
framework.Logf("check for nvidia GPUs failed. Got Error: %v", err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = framework.KubeDescribe("GPU [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("gpu-test")
|
||||
Context("attempt to use GPUs if available", func() {
|
||||
It("setup the node and create pods to test gpus", func() {
|
||||
By("ensuring that Nvidia GPUs exist on the node")
|
||||
if !checkIfNvidiaGPUsExistOnNode() {
|
||||
Skip("Nvidia GPUs do not exist on the node. Skipping test.")
|
||||
}
|
||||
By("ensuring that dynamic kubelet configuration is enabled")
|
||||
enabled, err := isKubeletConfigEnabled(f)
|
||||
framework.ExpectNoError(err)
|
||||
if !enabled {
|
||||
Skip("Dynamic Kubelet configuration is not enabled. Skipping test.")
|
||||
}
|
||||
|
||||
By("enabling support for GPUs")
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
defer func() {
|
||||
if oldCfg != nil {
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, oldCfg))
|
||||
}
|
||||
}()
|
||||
|
||||
// Enable Accelerators
|
||||
oldCfg, err = getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
newCfg := oldCfg.DeepCopy()
|
||||
newCfg.FeatureGates[string(features.Accelerators)] = true
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||
|
||||
By("Waiting for GPUs to become available on the local node")
|
||||
Eventually(gpusExistOnAllNodes(f), 10*time.Minute, time.Second).Should(BeTrue())
|
||||
|
||||
By("Creating a pod that will consume all GPUs")
|
||||
podSuccess := makePod(getGPUsAvailable(f), "gpus-success")
|
||||
podSuccess = f.PodClient().CreateSync(podSuccess)
|
||||
|
||||
By("Checking the containers in the pod had restarted at-least twice successfully thereby ensuring GPUs are reused")
|
||||
const minContainerRestartCount = 2
|
||||
Eventually(func() bool {
|
||||
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podSuccess.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("failed to get pod status: %v", err)
|
||||
return false
|
||||
}
|
||||
if p.Status.ContainerStatuses[0].RestartCount < minContainerRestartCount {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, time.Minute, time.Second).Should(BeTrue())
|
||||
|
||||
By("Checking if the pod outputted Success to its logs")
|
||||
framework.ExpectNoError(f.PodClient().MatchContainerOutput(podSuccess.Name, podSuccess.Name, "Success"))
|
||||
|
||||
By("Creating a new pod requesting a GPU and noticing that it is rejected by the Kubelet")
|
||||
podFailure := makePod(1, "gpu-failure")
|
||||
framework.WaitForPodCondition(f.ClientSet, f.Namespace.Name, podFailure.Name, "pod rejected", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
return true, nil
|
||||
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
By("stopping the original Pod with GPUs")
|
||||
gp := int64(0)
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(podSuccess.Name, &deleteOptions, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
By("attempting to start the failed pod again")
|
||||
f.PodClient().DeleteSync(podFailure.Name, &deleteOptions, framework.DefaultPodDeletionTimeout)
|
||||
podFailure = f.PodClient().CreateSync(podFailure)
|
||||
|
||||
By("Checking if the pod outputted Success to its logs")
|
||||
framework.ExpectNoError(f.PodClient().MatchContainerOutput(podFailure.Name, podFailure.Name, "Success"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func makePod(gpus int64, name string) *v1.Pod {
|
||||
resources := v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceNvidiaGPU: *resource.NewQuantity(gpus, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
gpuverificationCmd := fmt.Sprintf("if [[ %d -ne $(ls /dev/ | egrep '^nvidia[0-9]+$' | wc -l) ]]; then exit 1; else echo Success; fi", gpus)
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: name,
|
||||
Command: []string{"sh", "-c", gpuverificationCmd},
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
204
vendor/k8s.io/kubernetes/test/e2e_node/gubernator.sh
generated
vendored
Executable file
204
vendor/k8s.io/kubernetes/test/e2e_node/gubernator.sh
generated
vendored
Executable file
@ -0,0 +1,204 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Make bucket and a folder for e2e-node test logs.
|
||||
# Populate the folder from the logs stored in /tmp/_artifacts/ in the same way as a
|
||||
# jenkins build would, and then print the URL to view the test results on Gubernator
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source cluster/lib/logging.sh
|
||||
|
||||
|
||||
if [[ $# -eq 0 || ! $1 =~ ^[Yy]$ ]]; then
|
||||
read -p "Do you want to run gubernator.sh and upload logs publicly to GCS? [y/n]" yn
|
||||
echo
|
||||
if [[ ! $yn =~ ^[Yy]$ ]]; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check that user has gsutil
|
||||
if [[ $(which gsutil) == "" ]]; then
|
||||
echo "Could not find gsutil when running \`which gsutil\`"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that user has gcloud
|
||||
if [[ $(which gcloud) == "" ]]; then
|
||||
echo "Could not find gcloud when running: \`which gcloud\`"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that user has Credentialed Active account
|
||||
if ! gcloud auth list | grep -q "ACTIVE"; then
|
||||
echo "Could not find active account when running: \`gcloud auth list\`"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly gcs_acl="public-read"
|
||||
bucket_name="${USER}-g8r-logs"
|
||||
echo ""
|
||||
V=2 kube::log::status "Using bucket ${bucket_name}"
|
||||
|
||||
# Check if the bucket exists
|
||||
if ! gsutil ls gs:// | grep -q "gs://${bucket_name}/"; then
|
||||
V=2 kube::log::status "Creating public bucket ${bucket_name}"
|
||||
gsutil mb gs://${bucket_name}/
|
||||
# Make all files in the bucket publicly readable
|
||||
gsutil acl ch -u AllUsers:R gs://${bucket_name}
|
||||
else
|
||||
V=2 kube::log::status "Bucket already exists"
|
||||
fi
|
||||
|
||||
# Path for e2e-node test results
|
||||
GCS_JOBS_PATH="gs://${bucket_name}/logs/e2e-node"
|
||||
|
||||
ARTIFACTS=${ARTIFACTS:-"/tmp/_artifacts"}
|
||||
BUILD_LOG_PATH="${ARTIFACTS}/build-log.txt"
|
||||
|
||||
if [[ ! -e $BUILD_LOG_PATH ]]; then
|
||||
echo "Could not find build-log.txt at ${BUILD_LOG_PATH}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get start and end timestamps based on build-log.txt file contents
|
||||
# Line where the actual tests start
|
||||
start_line=$(grep -n -m 1 "^=" ${BUILD_LOG_PATH} | sed 's/\([0-9]*\).*/\1/')
|
||||
# Create text file starting where the tests start
|
||||
after_start=$(tail -n +${start_line} ${BUILD_LOG_PATH})
|
||||
echo "${after_start}" >> build-log-cut.txt
|
||||
# Match the first timestamp
|
||||
start_time_raw=$(grep -m 1 -o '[0-9][0-9][0-9][0-9][[:blank:]][0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9]*' build-log-cut.txt)
|
||||
rm build-log-cut.txt
|
||||
# Make the date readable by date command (ex: 0101 00:00:00.000 -> 01/01 00:00:00.000)
|
||||
start_time=$(echo ${start_time_raw} | sed 's/^.\{2\}/&\//')
|
||||
V=2 kube::log::status "Started at ${start_time}"
|
||||
# Match the last timestamp in the build-log file
|
||||
end_time=$(grep -o '[0-9][0-9][0-9][0-9][[:blank:]][0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9]*' ${BUILD_LOG_PATH} | tail -1 | sed 's/^.\{2\}/&\//')
|
||||
# Convert to epoch time for Gubernator
|
||||
start_time_epoch=$(date -d "${start_time}" +%s)
|
||||
end_time_epoch=$(date -d "${end_time}" +%s)
|
||||
|
||||
# Make folder name for build from timestamp
|
||||
BUILD_STAMP=$(echo $start_time | sed 's/\///' | sed 's/ /_/')
|
||||
|
||||
GCS_LOGS_PATH="${GCS_JOBS_PATH}/${BUILD_STAMP}"
|
||||
|
||||
# Check if folder for same logs already exists
|
||||
if gsutil ls "${GCS_JOBS_PATH}" | grep -q "${BUILD_STAMP}"; then
|
||||
V=2 kube::log::status "Log files already uploaded"
|
||||
echo "Gubernator linked below:"
|
||||
echo "k8s-gubernator.appspot.com/build/${GCS_LOGS_PATH}?local=on"
|
||||
exit
|
||||
fi
|
||||
|
||||
for result in $(find ${ARTIFACTS} -type d -name "results"); do
|
||||
if [[ $result != "" && $result != "${ARTIFACTS}/results" && $result != $ARTIFACTS ]]; then
|
||||
mv $result/* $ARTIFACTS
|
||||
fi
|
||||
done
|
||||
|
||||
# Upload log files
|
||||
for upload_attempt in $(seq 3); do
|
||||
if [[ -d "${ARTIFACTS}" && -n $(ls -A "${ARTIFACTS}") ]]; then
|
||||
V=2 kube::log::status "Uploading artifacts"
|
||||
gsutil -m -q -o "GSUtil:use_magicfile=True" cp -a "${gcs_acl}" -r -c \
|
||||
-z log,xml,json "${ARTIFACTS}" "${GCS_LOGS_PATH}/artifacts" || continue
|
||||
fi
|
||||
break
|
||||
done
|
||||
for upload_attempt in $(seq 3); do
|
||||
if [[ -e "${BUILD_LOG_PATH}" ]]; then
|
||||
V=2 kube::log::status "Uploading build log"
|
||||
gsutil -q cp -Z -a "${gcs_acl}" "${BUILD_LOG_PATH}" "${GCS_LOGS_PATH}" || continue
|
||||
fi
|
||||
break
|
||||
done
|
||||
|
||||
|
||||
# Find the k8s version for started.json
|
||||
version=""
|
||||
if [[ -e "version" ]]; then
|
||||
version=$(cat "version")
|
||||
elif [[ -e "hack/lib/version.sh" ]]; then
|
||||
export KUBE_ROOT="."
|
||||
source "hack/lib/version.sh"
|
||||
kube::version::get_version_vars
|
||||
version="${KUBE_GIT_VERSION-}"
|
||||
fi
|
||||
if [[ -n "${version}" ]]; then
|
||||
V=2 kube::log::status "Found Kubernetes version: ${version}"
|
||||
else
|
||||
V=2 kube::log::status "Could not find Kubernetes version"
|
||||
fi
|
||||
|
||||
#Find build result from build-log.txt
|
||||
if grep -Fxq "Test Suite Passed" "${BUILD_LOG_PATH}"
|
||||
then
|
||||
build_result="SUCCESS"
|
||||
else
|
||||
build_result="FAILURE"
|
||||
fi
|
||||
|
||||
V=4 kube::log::status "Build result is ${build_result}"
|
||||
|
||||
if [[ -e "${ARTIFACTS}/started.json" ]]; then
|
||||
rm "${ARTIFACTS}/started.json"
|
||||
fi
|
||||
|
||||
if [[ -e "${ARTIFACTS}/finished.json" ]]; then
|
||||
rm "${ARTIFACTS}/finished.json"
|
||||
fi
|
||||
|
||||
V=2 kube::log::status "Constructing started.json and finished.json files"
|
||||
echo "{" >> "${ARTIFACTS}/started.json"
|
||||
echo " \"version\": \"${version}\"," >> "${ARTIFACTS}/started.json"
|
||||
echo " \"timestamp\": ${start_time_epoch}," >> "${ARTIFACTS}/started.json"
|
||||
echo " \"jenkins-node\": \"${NODE_NAME:-}\"" >> "${ARTIFACTS}/started.json"
|
||||
echo "}" >> "${ARTIFACTS}/started.json"
|
||||
|
||||
echo "{" >> "${ARTIFACTS}/finished.json"
|
||||
echo " \"result\": \"${build_result}\"," >> "${ARTIFACTS}/finished.json"
|
||||
echo " \"timestamp\": ${end_time_epoch}" >> "${ARTIFACTS}/finished.json"
|
||||
echo "}" >> "${ARTIFACTS}/finished.json"
|
||||
|
||||
|
||||
# Upload started.json
|
||||
V=2 kube::log::status "Uploading started.json and finished.json"
|
||||
V=2 kube::log::status "Run started at ${start_time}"
|
||||
json_file="${GCS_LOGS_PATH}/started.json"
|
||||
|
||||
for upload_attempt in $(seq 3); do
|
||||
V=2 kube::log::status "Uploading started.json to ${json_file} (attempt ${upload_attempt})"
|
||||
gsutil -q -h "Content-Type:application/json" cp -a "${gcs_acl}" "${ARTIFACTS}/started.json" \
|
||||
"${json_file}" || continue
|
||||
break
|
||||
done
|
||||
|
||||
# Upload finished.json
|
||||
for upload_attempt in $(seq 3); do
|
||||
V=2 kube::log::status "Uploading finished.json to ${GCS_LOGS_PATH} (attempt ${upload_attempt})"
|
||||
gsutil -q -h "Content-Type:application/json" cp -a "${gcs_acl}" "${ARTIFACTS}/finished.json" \
|
||||
"${GCS_LOGS_PATH}/finished.json" || continue
|
||||
break
|
||||
done
|
||||
|
||||
|
||||
echo "Gubernator linked below:"
|
||||
echo "k8s-gubernator.appspot.com/build/${bucket_name}/logs/e2e-node/${BUILD_STAMP}"
|
66
vendor/k8s.io/kubernetes/test/e2e_node/image_id_test.go
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/test/e2e_node/image_id_test.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("ImageID", func() {
|
||||
|
||||
busyBoxImage := "gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff"
|
||||
|
||||
f := framework.NewDefaultFramework("image-id-test")
|
||||
|
||||
It("should be set to the manifest digest (from RepoDigests) when available", func() {
|
||||
podDesc := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-with-repodigest",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "test",
|
||||
Image: busyBoxImage,
|
||||
Command: []string{"sh"},
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
pod := f.PodClient().Create(podDesc)
|
||||
|
||||
framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
|
||||
f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
runningPod, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
status := runningPod.Status
|
||||
|
||||
if len(status.ContainerStatuses) == 0 {
|
||||
framework.Failf("Unexpected pod status; %s", spew.Sdump(status))
|
||||
return
|
||||
}
|
||||
|
||||
Expect(status.ContainerStatuses[0].ImageID).To(ContainSubstring(busyBoxImage))
|
||||
})
|
||||
})
|
164
vendor/k8s.io/kubernetes/test/e2e_node/image_list.go
generated
vendored
Normal file
164
vendor/k8s.io/kubernetes/test/e2e_node/image_list.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
// Number of attempts to pull an image.
|
||||
maxImagePullRetries = 5
|
||||
// Sleep duration between image pull retry attempts.
|
||||
imagePullRetryDelay = time.Second
|
||||
// connection timeout for gRPC image service connection
|
||||
imageServiceConnectionTimeout = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NodeImageWhiteList is a list of images used in node e2e test. These images will be prepulled
|
||||
// before test running so that the image pulling won't fail in actual test.
|
||||
var NodeImageWhiteList = sets.NewString(
|
||||
"google/cadvisor:latest",
|
||||
"gcr.io/google-containers/stress:v1",
|
||||
busyboxImage,
|
||||
"gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff",
|
||||
"gcr.io/google_containers/node-problem-detector:v0.4.1",
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
imageutils.GetE2EImage(imageutils.ServeHostname),
|
||||
imageutils.GetE2EImage(imageutils.Netexec),
|
||||
imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||
framework.GetPauseImageNameForHostArch(),
|
||||
framework.GetGPUDevicePluginImage(),
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Union NodeImageWhiteList and CommonImageWhiteList into the framework image white list.
|
||||
framework.ImageWhiteList = NodeImageWhiteList.Union(commontest.CommonImageWhiteList)
|
||||
}
|
||||
|
||||
// puller represents a generic image puller
|
||||
type puller interface {
|
||||
// Pull pulls an image by name
|
||||
Pull(image string) ([]byte, error)
|
||||
// Name returns the name of the specific puller implementation
|
||||
Name() string
|
||||
}
|
||||
|
||||
type dockerPuller struct {
|
||||
}
|
||||
|
||||
func (dp *dockerPuller) Name() string {
|
||||
return "docker"
|
||||
}
|
||||
|
||||
func (dp *dockerPuller) Pull(image string) ([]byte, error) {
|
||||
// TODO(random-liu): Use docker client to get rid of docker binary dependency.
|
||||
return exec.Command("docker", "pull", image).CombinedOutput()
|
||||
}
|
||||
|
||||
type remotePuller struct {
|
||||
imageService internalapi.ImageManagerService
|
||||
}
|
||||
|
||||
func (rp *remotePuller) Name() string {
|
||||
return "CRI"
|
||||
}
|
||||
|
||||
func (rp *remotePuller) Pull(image string) ([]byte, error) {
|
||||
imageStatus, err := rp.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image})
|
||||
if err == nil && imageStatus != nil {
|
||||
return nil, nil
|
||||
}
|
||||
_, err = rp.imageService.PullImage(&runtimeapi.ImageSpec{Image: image}, nil)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func getPuller() (puller, error) {
|
||||
runtime := framework.TestContext.ContainerRuntime
|
||||
switch runtime {
|
||||
case "docker":
|
||||
return &dockerPuller{}, nil
|
||||
case "remote":
|
||||
endpoint := framework.TestContext.ContainerRuntimeEndpoint
|
||||
if framework.TestContext.ImageServiceEndpoint != "" {
|
||||
//ImageServiceEndpoint is the same as ContainerRuntimeEndpoint if not
|
||||
//explicitly specified
|
||||
//https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet.go#L517
|
||||
endpoint = framework.TestContext.ImageServiceEndpoint
|
||||
}
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("can't prepull images, no remote endpoint provided")
|
||||
}
|
||||
is, err := remote.NewRemoteImageService(endpoint, imageServiceConnectionTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &remotePuller{
|
||||
imageService: is,
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't prepull images, unknown container runtime %q", runtime)
|
||||
}
|
||||
|
||||
// Pre-fetch all images tests depend on so that we don't fail in an actual test.
|
||||
func PrePullAllImages() error {
|
||||
puller, err := getPuller()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
images := framework.ImageWhiteList.List()
|
||||
glog.V(4).Infof("Pre-pulling images with %s %+v", puller.Name(), images)
|
||||
for _, image := range images {
|
||||
var (
|
||||
err error
|
||||
output []byte
|
||||
)
|
||||
for i := 0; i < maxImagePullRetries; i++ {
|
||||
if i > 0 {
|
||||
time.Sleep(imagePullRetryDelay)
|
||||
}
|
||||
if output, err = puller.Pull(image); err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("Failed to pull %s as user %q, retrying in %s (%d of %d): %v",
|
||||
image, usr.Username, imagePullRetryDelay.String(), i+1, maxImagePullRetries, err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("Could not pre-pull image %s %v output: %s", image, err, output)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
6
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/OWNERS
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- dashpole
|
||||
- krzyzacy
|
||||
- Random-Liu
|
||||
- yguo0905
|
||||
- yujuhong
|
20
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/README.md
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/README.md
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# Node e2e job migration notice:
|
||||
|
||||
Sig-testing is actively migrating node e2e jobs from Jenkins to [Prow],
|
||||
and we are moving *.property and image-config.yaml files to [test-infra]
|
||||
|
||||
If you want to update those files, please also update them in [test-infra].
|
||||
|
||||
If you have any questions, please contact @krzyzacy or #sig-testing.
|
||||
|
||||
|
||||
## Test-infra Links:
|
||||
Here's where the existing node e2e job config live:
|
||||
|
||||
[Image config files](https://github.com/kubernetes/test-infra/tree/master/jobs/e2e_node)
|
||||
|
||||
[Node test job args (.properties equivalent)](https://github.com/kubernetes/test-infra/blob/master/jobs/config.json)
|
||||
|
||||
|
||||
[test-infra]: https://github.com/kubernetes/test-infra
|
||||
[Prow]: https://github.com/kubernetes/test-infra/tree/master/prow
|
221
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/benchmark/benchmark-config.yaml
generated
vendored
Normal file
221
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/benchmark/benchmark-config.yaml
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
---
|
||||
images:
|
||||
containervm-density1:
|
||||
image: e2e-node-containervm-v20161208-image
|
||||
project: kubernetes-node-e2e-images
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'create 35 pods with 0s? interval \[Benchmark\]'
|
||||
containervm-density2:
|
||||
image: e2e-node-containervm-v20161208-image
|
||||
project: kubernetes-node-e2e-images
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'create 105 pods with 0s? interval \[Benchmark\]'
|
||||
containervm-density2-qps60:
|
||||
image: e2e-node-containervm-v20161208-image
|
||||
project: kubernetes-node-e2e-images
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'create 105 pods with 0s? interval \(QPS 60\) \[Benchmark\]'
|
||||
containervm-density3:
|
||||
image: e2e-node-containervm-v20161208-image
|
||||
project: kubernetes-node-e2e-images
|
||||
machine: n1-standard-2
|
||||
tests:
|
||||
- 'create 105 pods with 0s? interval \[Benchmark\]'
|
||||
containervm-density4:
|
||||
image: e2e-node-containervm-v20161208-image
|
||||
project: kubernetes-node-e2e-images
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'create 105 pods with 100ms interval \[Benchmark\]'
|
||||
containervm-resource1:
|
||||
image: e2e-node-containervm-v20161208-image
|
||||
project: kubernetes-node-e2e-images
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
containervm-resource2:
|
||||
image: e2e-node-containervm-v20161208-image
|
||||
project: kubernetes-node-e2e-images
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
containervm-resource3:
|
||||
image: e2e-node-containervm-v20161208-image
|
||||
project: kubernetes-node-e2e-images
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
||||
cosstable2-resource1:
|
||||
image: cos-stable-59-9460-64-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
cosstable2-resource2:
|
||||
image: cos-stable-59-9460-64-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
cosstable2-resource3:
|
||||
image: cos-stable-59-9460-64-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
||||
cosstable1-resource1:
|
||||
image: cos-stable-60-9592-76-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
cosstable1-resource2:
|
||||
image: cos-stable-60-9592-76-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
cosstable1-resource3:
|
||||
image: cos-stable-60-9592-76-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
||||
cosdev-resource1:
|
||||
image: cos-beta-61-9765-31-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
cosdev-resource2:
|
||||
image: cos-beta-61-9765-31-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
cosdev-resource3:
|
||||
image: cos-beta-61-9765-31-0
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
||||
cos-docker112-resource1:
|
||||
image: cos-stable-60-9592-76-0
|
||||
image_description: cos-stable-60-9592-76-0 with docker 1.12.6
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/cos-init-docker.yaml,gci-update-strategy=update_disabled,gci-docker-version=1.12.6"
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
cos-docker112-resource2:
|
||||
image: cos-stable-60-9592-76-0
|
||||
image_description: cos-stable-60-9592-76-0 with docker 1.12.6
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/cos-init-docker.yaml,gci-update-strategy=update_disabled,gci-docker-version=1.12.6"
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
cos-docker112-resource3:
|
||||
image: cos-stable-60-9592-76-0
|
||||
image_description: cos-stable-60-9592-76-0 with docker 1.12.6
|
||||
project: cos-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/cos-init-docker.yaml,gci-update-strategy=update_disabled,gci-docker-version=1.12.6"
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
||||
coreosalpha-resource1:
|
||||
image: coreos-alpha-1478-0-0-v20170719
|
||||
project: coreos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
coreosalpha-resource2:
|
||||
image: coreos-alpha-1478-0-0-v20170719
|
||||
project: coreos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
coreosalpha-resource3:
|
||||
image: coreos-alpha-1478-0-0-v20170719
|
||||
project: coreos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
||||
coreosstable-resource1:
|
||||
image: coreos-stable-1409-7-0-v20170719
|
||||
project: coreos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
coreosstable-resource2:
|
||||
image: coreos-stable-1409-7-0-v20170719
|
||||
project: coreos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
coreosstable-resource3:
|
||||
image: coreos-stable-1409-7-0-v20170719
|
||||
project: coreos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
||||
ubuntustable-resource1:
|
||||
image: ubuntu-gke-1604-xenial-v20170816-1
|
||||
project: ubuntu-os-gke-cloud
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
ubuntustable-resource2:
|
||||
image: ubuntu-gke-1604-xenial-v20170816-1
|
||||
project: ubuntu-os-gke-cloud
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
ubuntustable-resource3:
|
||||
image: ubuntu-gke-1604-xenial-v20170816-1
|
||||
project: ubuntu-os-gke-cloud
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
||||
ubuntustable-docker112-resource1:
|
||||
image: ubuntu-gke-1604-xenial-v20170816-1
|
||||
image_description: ubuntu-gke-1604-xenial-v20170816-1 with docker 1.12.6
|
||||
project: ubuntu-os-gke-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/ubuntu-init-docker.yaml,ubuntu-docker-version=1.12.6"
|
||||
tests:
|
||||
- 'resource tracking for 0 pods per node \[Benchmark\]'
|
||||
ubuntustable-docker112-resource2:
|
||||
image: ubuntu-gke-1604-xenial-v20170816-1
|
||||
image_description: ubuntu-gke-1604-xenial-v20170816-1 with docker 1.12.6
|
||||
project: ubuntu-os-gke-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/ubuntu-init-docker.yaml,ubuntu-docker-version=1.12.6"
|
||||
tests:
|
||||
- 'resource tracking for 35 pods per node \[Benchmark\]'
|
||||
ubuntustable-docker112-resource3:
|
||||
image: ubuntu-gke-1604-xenial-v20170816-1
|
||||
image_description: ubuntu-gke-1604-xenial-v20170816-1 with docker 1.12.6
|
||||
project: ubuntu-os-gke-cloud
|
||||
machine: n1-standard-1
|
||||
metadata: "user-data<test/e2e_node/jenkins/ubuntu-init-docker.yaml,ubuntu-docker-version=1.12.6"
|
||||
tests:
|
||||
- 'resource tracking for 105 pods per node \[Benchmark\]'
|
9
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/benchmark/jenkins-benchmark.properties
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/benchmark/jenkins-benchmark.properties
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/benchmark/benchmark-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
PARALLELISM=1
|
43
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/conformance/conformance-jenkins.sh
generated
vendored
Executable file
43
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/conformance/conformance-jenkins.sh
generated
vendored
Executable file
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Script executed by jenkins to run node conformance test against gce
|
||||
# Usage: test/e2e_node/jenkins/conformance-node-jenkins.sh <path to properties>
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
: "${1:?Usage test/e2e_node/jenkins/conformance-node-jenkins.sh <path to properties>}"
|
||||
|
||||
. $1
|
||||
|
||||
make generated_files
|
||||
|
||||
WORKSPACE=${WORKSPACE:-"/tmp/"}
|
||||
ARTIFACTS=${WORKSPACE}/_artifacts
|
||||
TIMEOUT=${TIMEOUT:-"45m"}
|
||||
|
||||
mkdir -p ${ARTIFACTS}
|
||||
|
||||
go run test/e2e_node/runner/remote/run_remote.go --test-suite=conformance \
|
||||
--logtostderr --vmodule=*=4 --ssh-env="gce" --ssh-user="$GCE_USER" \
|
||||
--zone="$GCE_ZONE" --project="$GCE_PROJECT" --hosts="$GCE_HOSTS" \
|
||||
--images="$GCE_IMAGES" --image-project="$GCE_IMAGE_PROJECT" \
|
||||
--image-config-file="$GCE_IMAGE_CONFIG_PATH" --cleanup="$CLEANUP" \
|
||||
--results-dir="$ARTIFACTS" --test-timeout="$TIMEOUT" \
|
||||
--test_args="--kubelet-flags=\"$KUBELET_ARGS\"" \
|
||||
--instance-metadata="$GCE_INSTANCE_METADATA" \
|
||||
--system-spec-name="$SYSTEM_SPEC_NAME"
|
6
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/conformance/jenkins-conformance.properties
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/conformance/jenkins-conformance.properties
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
44
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/copy-e2e-image.sh
generated
vendored
Executable file
44
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/copy-e2e-image.sh
generated
vendored
Executable file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Usage: copy-e2e-image.sh <image-name> <from-project-name> <to-project-name>
|
||||
|
||||
# See *.properties for list of images to copy,
|
||||
# typically from kubernetes-node-e2e-images
|
||||
|
||||
set -e
|
||||
|
||||
print_usage() {
|
||||
echo "This script helps copy a GCE image from a source to a target project"
|
||||
echo -e "\nUsage:\n$0 <from-image-name> <from-project-name> <to-project-name> <to-image-name>\n"
|
||||
}
|
||||
|
||||
if [ $# -ne 4 ]; then
|
||||
print_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FROM_IMAGE=$1
|
||||
FROM_PROJECT=$2
|
||||
TO_PROJECT=$3
|
||||
TO_IMAGE=$4
|
||||
|
||||
echo "Copying image $FROM_IMAGE from project $FROM_PROJECT to project $TO_PROJECT as image $TO_IMAGE..."
|
||||
gcloud compute --project $TO_PROJECT disks create $TO_IMAGE --image=https://www.googleapis.com/compute/v1/projects/$FROM_PROJECT/global/images/$FROM_IMAGE
|
||||
gcloud compute --project $TO_PROJECT images create $TO_IMAGE \
|
||||
--source-disk=$TO_IMAGE \
|
||||
--description="Cloned from projects/$2/global/images/$1 by $USER on $(date)"
|
||||
gcloud -q compute --project $TO_PROJECT disks delete $TO_IMAGE
|
44
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/coreos-init.json
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/coreos-init.json
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
{
|
||||
"ignition":{"version": "2.0.0"},
|
||||
"systemd": {
|
||||
"units": [{
|
||||
"name": "update-engine.service",
|
||||
"mask": true
|
||||
},
|
||||
{
|
||||
"name": "locksmithd.service",
|
||||
"mask": true
|
||||
},
|
||||
{
|
||||
"name": "docker.service",
|
||||
"dropins": [{
|
||||
"name": "10-disable-systemd-cgroup-driver.conf",
|
||||
"contents": "[Service]\nCPUAccounting=yes\nMemoryAccounting=yes\nEnvironment=\"DOCKER_CGROUPS=\""
|
||||
}]
|
||||
}]
|
||||
},
|
||||
"passwd": {
|
||||
"users": [{
|
||||
"name": "jenkins",
|
||||
"create": {
|
||||
"groups": ["docker", "sudo"]
|
||||
}
|
||||
}]
|
||||
},
|
||||
"storage": {
|
||||
"files": [
|
||||
{
|
||||
"filesystem": "root",
|
||||
"path": "/etc/ssh/sshd_config",
|
||||
"contents": {
|
||||
"source": "data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0AUsePrivilegeSeparation%20sandbox%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0AAuthenticationMethods%20publickey",
|
||||
"verification": {}
|
||||
},
|
||||
"mode": 384,
|
||||
"user": {},
|
||||
"group": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/cos-init-disable-live-restore.yaml
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/cos-init-disable-live-restore.yaml
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
#cloud-config
|
||||
#
|
||||
# This cloud-init configuration file disables Docker live-restore.
|
||||
|
||||
runcmd:
|
||||
- cp /usr/lib/systemd/system/docker.service /etc/systemd/system/docker.service
|
||||
- sed -i '/^ExecStart=\/usr\/bin\/dockerd/ s/$/ --live-restore=false/' /etc/systemd/system/docker.service
|
||||
- systemctl daemon-reload
|
||||
- systemctl restart docker
|
||||
- mount /tmp /tmp -o remount,exec,suid
|
||||
- usermod -a -G docker jenkins
|
||||
- mkdir -p /var/lib/kubelet
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs
|
||||
- mount --bind /home/kubernetes/containerized_mounter/ /home/kubernetes/containerized_mounter/
|
||||
- mount -o remount, exec /home/kubernetes/containerized_mounter/
|
||||
- wget https://dl.k8s.io/gci-mounter/mounter.tar -O /tmp/mounter.tar
|
||||
- tar xvf /tmp/mounter.tar -C /home/kubernetes/containerized_mounter/rootfs
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --rbind /var/lib/kubelet /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --make-rshared /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --bind /proc /home/kubernetes/containerized_mounter/rootfs/proc
|
||||
- mount --bind /dev /home/kubernetes/containerized_mounter/rootfs/dev
|
||||
- rm /tmp/mounter.tar
|
127
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/cos-init-docker.yaml
generated
vendored
Normal file
127
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/cos-init-docker.yaml
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /etc/systemd/system/upgrade-docker.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Upgrade Docker Binaries
|
||||
Requires=network-online.target
|
||||
After=network-online.target docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
# RemainAfterExit so the service runs exactly once.
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /home/upgrade-docker/bin
|
||||
ExecStartPre=/bin/mount --bind /home/upgrade-docker/bin /home/upgrade-docker/bin
|
||||
ExecStartPre=/bin/mount -o remount,exec /home/upgrade-docker/bin
|
||||
ExecStart=/bin/bash /tmp/upgrade-docker/upgrade.sh
|
||||
ExecStartPost=-/bin/rm -rf /home/upgrade-docker/download
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
- path: /tmp/upgrade-docker/upgrade.sh
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# This script reads a GCE metadata key for the user speficied Docker
|
||||
# version, downloads, and replaces the builtin Docker with it.
|
||||
|
||||
set -x
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Checks if a Docker binary is the version we want.
|
||||
# $1: Docker binary
|
||||
# $2: Requested version
|
||||
check_installed() {
|
||||
local docker_bin="$1"
|
||||
local requested_version="$2"
|
||||
[[ "$(${docker_bin} --version)" =~ "Docker version ${requested_version}," ]]
|
||||
}
|
||||
|
||||
# $1: Docker version
|
||||
download_and_install_docker() {
|
||||
local requested_version="$1"
|
||||
local download_dir=/home/upgrade-docker/download/docker-"${requested_version}"
|
||||
local install_location=/home/upgrade-docker/bin
|
||||
local docker_tgz="docker-${requested_version}.tgz"
|
||||
|
||||
if [[ "${requested_version}" =~ "rc" ]]; then
|
||||
# RC releases all have the word "rc" in their version
|
||||
# number, e.g., "1.11.1-rc1".
|
||||
download_url="https://test.docker.com/builds/Linux/x86_64/${docker_tgz}"
|
||||
else
|
||||
download_url="https://get.docker.com/builds/Linux/x86_64/${docker_tgz}"
|
||||
fi
|
||||
|
||||
echo "Downloading Docker version ${requested_version} from "\
|
||||
"${download_url} to ${download_dir} ..."
|
||||
|
||||
# Download and install the binaries.
|
||||
mkdir -p "${download_dir}"/binaries
|
||||
/usr/bin/curl -o "${download_dir}/${docker_tgz}" --fail "${download_url}"
|
||||
tar xzf "${download_dir}/${docker_tgz}" -C "${download_dir}"/binaries
|
||||
cp "${download_dir}"/binaries/docker/docker* "${install_location}"
|
||||
mount --bind "${install_location}"/docker /usr/bin/docker
|
||||
mount --bind "${install_location}"/docker-containerd /usr/bin/docker-containerd
|
||||
mount --bind "${install_location}"/docker-containerd-shim /usr/bin/docker-containerd-shim
|
||||
mount --bind "${install_location}"/dockerd /usr/bin/dockerd
|
||||
mount --bind "${install_location}"/docker-proxy /usr/bin/docker-proxy
|
||||
mount --bind "${install_location}"/docker-runc /usr/bin/docker-runc
|
||||
echo "PATH=/home/upgrade-docker/bin:/sbin:/bin:/usr/sbin:/usr/bin" >> /etc/default/docker
|
||||
}
|
||||
|
||||
# $1: Metadata key
|
||||
get_metadata() {
|
||||
/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/"$1"
|
||||
}
|
||||
|
||||
main() {
|
||||
# Get the desired Docker version through the following metadata key.
|
||||
local requested_version="$(get_metadata "gci-docker-version")"
|
||||
if [[ -z "${requested_version}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if we have the requested version installed.
|
||||
if check_installed /usr/bin/docker "${requested_version}"; then
|
||||
echo "Requested version already installed. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Stop the docker daemon during upgrade.
|
||||
/usr/bin/systemctl stop docker
|
||||
download_and_install_docker "${requested_version}"
|
||||
|
||||
# Assert that the upgrade was successful.
|
||||
local rc=0
|
||||
check_installed /usr/bin/docker "${requested_version}" || rc=1
|
||||
/usr/bin/systemctl start docker && exit $rc
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
runcmd:
|
||||
- systemctl daemon-reload
|
||||
- systemctl start upgrade-docker.service
|
||||
- mount /tmp /tmp -o remount,exec,suid
|
||||
- usermod -a -G docker jenkins
|
||||
- mkdir -p /var/lib/kubelet
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs
|
||||
- mount --bind /home/kubernetes/containerized_mounter/ /home/kubernetes/containerized_mounter/
|
||||
- mount -o remount, exec /home/kubernetes/containerized_mounter/
|
||||
- wget https://dl.k8s.io/gci-mounter/mounter.tar -O /tmp/mounter.tar
|
||||
- tar xvf /tmp/mounter.tar -C /home/kubernetes/containerized_mounter/rootfs
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --rbind /var/lib/kubelet /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --make-rshared /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --bind /proc /home/kubernetes/containerized_mounter/rootfs/proc
|
||||
- mount --bind /dev /home/kubernetes/containerized_mounter/rootfs/dev
|
||||
- rm /tmp/mounter.tar
|
22
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/cos-init-live-restore.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/cos-init-live-restore.yaml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
#cloud-config
|
||||
|
||||
runcmd:
|
||||
- cp /usr/lib/systemd/system/docker.service /etc/systemd/system/
|
||||
- sed -i -e 's/-s overlay/-s overlay2/g' /etc/systemd/system/docker.service
|
||||
- systemctl daemon-reload
|
||||
- echo '{"live-restore":true}' > /etc/docker/daemon.json
|
||||
- systemctl restart docker
|
||||
- mount /tmp /tmp -o remount,exec,suid
|
||||
- usermod -a -G docker jenkins
|
||||
- mkdir -p /var/lib/kubelet
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs
|
||||
- mount --bind /home/kubernetes/containerized_mounter/ /home/kubernetes/containerized_mounter/
|
||||
- mount -o remount, exec /home/kubernetes/containerized_mounter/
|
||||
- wget https://dl.k8s.io/gci-mounter/mounter.tar -O /tmp/mounter.tar
|
||||
- tar xvf /tmp/mounter.tar -C /home/kubernetes/containerized_mounter/rootfs
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --rbind /var/lib/kubelet /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --make-rshared /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --bind /proc /home/kubernetes/containerized_mounter/rootfs/proc
|
||||
- mount --bind /dev /home/kubernetes/containerized_mounter/rootfs/dev
|
||||
- rm /tmp/mounter.tar
|
15
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/cos-docker-validation.properties
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/cos-docker-validation.properties
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
GCI_IMAGE_PROJECT=container-vm-image-staging
|
||||
GCI_IMAGE_FAMILY=gci-next-canary
|
||||
GCI_IMAGE=$(gcloud compute images describe-from-family ${GCI_IMAGE_FAMILY} --project=${GCI_IMAGE_PROJECT} --format="value(name)")
|
||||
GCI_CLOUD_INIT=test/e2e_node/jenkins/gci-init.yaml
|
||||
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGES=${GCI_IMAGE}
|
||||
GCE_IMAGE_PROJECT=${GCI_IMAGE_PROJECT}
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=node-cos-docker-validation
|
||||
# user-data is the GCI cloud init config file.
|
||||
GCE_INSTANCE_METADATA="user-data<${GCI_CLOUD_INIT},gci-update-strategy=update_disabled"
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
TIMEOUT=1h
|
21
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/jenkins-perf.properties
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/jenkins-perf.properties
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
GCI_IMAGE_PROJECT=container-vm-image-staging
|
||||
GCI_IMAGE_FAMILY=gci-canary-test
|
||||
GCI_IMAGE=$(gcloud compute images describe-from-family ${GCI_IMAGE_FAMILY} --project=${GCI_IMAGE_PROJECT} --format="value(name)")
|
||||
DOCKER_VERSION=$(curl -fsSL --retry 3 https://api.github.com/repos/docker/docker/releases | tac | tac | grep -m 1 "\"tag_name\"\:" | grep -Eo "[0-9\.rc-]+")
|
||||
GCI_CLOUD_INIT=test/e2e_node/jenkins/gci-init.yaml
|
||||
|
||||
# Render the test config file
|
||||
GCE_IMAGE_CONFIG_PATH=`mktemp`
|
||||
CONFIG_FILE=test/e2e_node/jenkins/docker_validation/perf-config.yaml
|
||||
cp $CONFIG_FILE $GCE_IMAGE_CONFIG_PATH
|
||||
sed -i -e "s@{{IMAGE}}@${GCI_IMAGE}@g" $GCE_IMAGE_CONFIG_PATH
|
||||
sed -i -e "s@{{IMAGE_PROJECT}}@${GCI_IMAGE_PROJECT}@g" $GCE_IMAGE_CONFIG_PATH
|
||||
sed -i -e "s@{{METADATA}}@user-data<${GCI_CLOUD_INIT},gci-docker-version=${DOCKER_VERSION},gci-update-strategy=update_disabled@g" $GCE_IMAGE_CONFIG_PATH
|
||||
|
||||
GCE_HOSTS=
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=node-cos-docker-validation-ci
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]"'
|
||||
PARALLELISM=1
|
17
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/jenkins-validation.properties
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/jenkins-validation.properties
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
GCI_IMAGE_PROJECT=container-vm-image-staging
|
||||
GCI_IMAGE_FAMILY=gci-canary-test
|
||||
GCI_IMAGE=$(gcloud compute images describe-from-family ${GCI_IMAGE_FAMILY} --project=${GCI_IMAGE_PROJECT} --format="value(name)")
|
||||
DOCKER_VERSION=$(curl -fsSL --retry 3 https://api.github.com/repos/docker/docker/releases | tac | tac | grep -m 1 "\"tag_name\"\:" | grep -Eo "[0-9\.rc-]+")
|
||||
GCI_CLOUD_INIT=test/e2e_node/jenkins/gci-init.yaml
|
||||
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGES=${GCI_IMAGE}
|
||||
GCE_IMAGE_PROJECT=${GCI_IMAGE_PROJECT}
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=node-cos-docker-validation-ci
|
||||
# user-data is the GCI cloud init config file.
|
||||
# gci-docker-version specifies docker version in GCI image.
|
||||
GCE_INSTANCE_METADATA="user-data<${GCI_CLOUD_INIT},gci-docker-version=${DOCKER_VERSION},gci-update-strategy=update_disabled"
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
TIMEOUT=1h
|
58
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/perf-config.yaml
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/docker_validation/perf-config.yaml
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
images:
|
||||
density1:
|
||||
image: "{{IMAGE}}"
|
||||
project: "{{IMAGE_PROJECT}}"
|
||||
metadata: "{{METADATA}}"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- '.*create 35 pods with 0s? interval \[Benchmark\]'
|
||||
density2:
|
||||
image: "{{IMAGE}}"
|
||||
project: "{{IMAGE_PROJECT}}"
|
||||
metadata: "{{METADATA}}"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- '.*create 105 pods with 0s? interval \[Benchmark\]'
|
||||
density3:
|
||||
image: "{{IMAGE}}"
|
||||
project: "{{IMAGE_PROJECT}}"
|
||||
metadata: "{{METADATA}}"
|
||||
machine: n1-standard-2
|
||||
tests:
|
||||
- '.*create 105 pods with 0s? interval \[Benchmark\]'
|
||||
density4:
|
||||
image: "{{IMAGE}}"
|
||||
project: "{{IMAGE_PROJECT}}"
|
||||
metadata: "{{METADATA}}"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- '.*create 35 pods with 100ms interval \[Benchmark\]'
|
||||
density5:
|
||||
image: "{{IMAGE}}"
|
||||
project: "{{IMAGE_PROJECT}}"
|
||||
metadata: "{{METADATA}}"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- '.*create 105 pods with 100ms interval \[Benchmark\]'
|
||||
density6:
|
||||
image: "{{IMAGE}}"
|
||||
project: "{{IMAGE_PROJECT}}"
|
||||
metadata: "{{METADATA}}"
|
||||
machine: n1-standard-2
|
||||
tests:
|
||||
- '.*create 105 pods with 100ms interval \[Benchmark\]'
|
||||
density7:
|
||||
image: "{{IMAGE}}"
|
||||
project: "{{IMAGE_PROJECT}}"
|
||||
metadata: "{{METADATA}}"
|
||||
machine: n1-standard-1
|
||||
tests:
|
||||
- '.*create 105 pods with 300ms interval \[Benchmark\]'
|
||||
density8:
|
||||
image: "{{IMAGE}}"
|
||||
project: "{{IMAGE_PROJECT}}"
|
||||
metadata: "{{METADATA}}"
|
||||
machine: n1-standard-2
|
||||
tests:
|
||||
- '.*create 105 pods with 300ms interval \[Benchmark\]'
|
50
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/e2e-node-jenkins.sh
generated
vendored
Executable file
50
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/e2e-node-jenkins.sh
generated
vendored
Executable file
@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Script executed by jenkins to run node e2e tests against gce
|
||||
# Usage: test/e2e_node/jenkins/e2e-node-jenkins.sh <path to properties>
|
||||
# Properties files:
|
||||
# - test/e2e_node/jenkins/jenkins-ci.properties : for running jenkins ci
|
||||
# - test/e2e_node/jenkins/jenkins-pull.properties : for running jenkins pull request builder
|
||||
# - test/e2e_node/jenkins/template.properties : template for creating a properties file to run locally
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
: "${1:?Usage test/e2e_node/jenkins/e2e-node-jenkins.sh <path to properties>}"
|
||||
|
||||
. $1
|
||||
|
||||
# indirectly generates test/e2e/generated/bindata.go too
|
||||
make generated_files
|
||||
|
||||
# TODO converge build steps with hack/build-go some day if possible.
|
||||
go build test/e2e_node/environment/conformance.go
|
||||
|
||||
PARALLELISM=${PARALLELISM:-8}
|
||||
WORKSPACE=${WORKSPACE:-"/tmp/"}
|
||||
ARTIFACTS=${WORKSPACE}/_artifacts
|
||||
TIMEOUT=${TIMEOUT:-"45m"}
|
||||
|
||||
mkdir -p ${ARTIFACTS}
|
||||
|
||||
go run test/e2e_node/runner/remote/run_remote.go --logtostderr --vmodule=*=4 \
|
||||
--ssh-env="gce" --ssh-user="$GCE_USER" --zone="$GCE_ZONE" --project="$GCE_PROJECT" \
|
||||
--hosts="$GCE_HOSTS" --images="$GCE_IMAGES" --image-project="$GCE_IMAGE_PROJECT" \
|
||||
--image-config-file="$GCE_IMAGE_CONFIG_PATH" --cleanup="$CLEANUP" \
|
||||
--results-dir="$ARTIFACTS" --ginkgo-flags="--nodes=$PARALLELISM $GINKGO_FLAGS" \
|
||||
--test-timeout="$TIMEOUT" --test_args="$TEST_ARGS --kubelet-flags=\"$KUBELET_ARGS\"" \
|
||||
--instance-metadata="$GCE_INSTANCE_METADATA" --system-spec-name="$SYSTEM_SPEC_NAME"
|
19
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/gci-init-gpu.yaml
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/gci-init-gpu.yaml
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
#cloud-config
|
||||
|
||||
runcmd:
|
||||
- modprobe configs
|
||||
- docker run -v /dev:/dev -v /home/kubernetes/bin/nvidia:/rootfs/nvidia -v /etc/os-release:/rootfs/etc/os-release -v /proc/sysrq-trigger:/sysrq -e BASE_DIR=/rootfs/nvidia --privileged gcr.io/google_containers/cos-nvidia-driver-install@sha256:cb55c7971c337fece62f2bfe858662522a01e43ac9984a2dd1dd5c71487d225c
|
||||
- mount /tmp /tmp -o remount,exec,suid
|
||||
- usermod -a -G docker jenkins
|
||||
- mkdir -p /var/lib/kubelet
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs
|
||||
- mount --bind /home/kubernetes/containerized_mounter/ /home/kubernetes/containerized_mounter/
|
||||
- mount -o remount, exec /home/kubernetes/containerized_mounter/
|
||||
- wget https://dl.k8s.io/gci-mounter/mounter.tar -O /tmp/mounter.tar
|
||||
- tar xvf /tmp/mounter.tar -C /home/kubernetes/containerized_mounter/rootfs
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --rbind /var/lib/kubelet /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --make-rshared /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --bind /proc /home/kubernetes/containerized_mounter/rootfs/proc
|
||||
- mount --bind /dev /home/kubernetes/containerized_mounter/rootfs/dev
|
||||
- rm /tmp/mounter.tar
|
17
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/gci-init.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/gci-init.yaml
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
#cloud-config
|
||||
|
||||
runcmd:
|
||||
- mount /tmp /tmp -o remount,exec,suid
|
||||
- usermod -a -G docker jenkins
|
||||
- mkdir -p /var/lib/kubelet
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs
|
||||
- mount --bind /home/kubernetes/containerized_mounter/ /home/kubernetes/containerized_mounter/
|
||||
- mount -o remount, exec /home/kubernetes/containerized_mounter/
|
||||
- wget https://dl.k8s.io/gci-mounter/mounter.tar -O /tmp/mounter.tar
|
||||
- tar xvf /tmp/mounter.tar -C /home/kubernetes/containerized_mounter/rootfs
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --rbind /var/lib/kubelet /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --make-rshared /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --bind /proc /home/kubernetes/containerized_mounter/rootfs/proc
|
||||
- mount --bind /dev /home/kubernetes/containerized_mounter/rootfs/dev
|
||||
- rm /tmp/mounter.tar
|
26
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/image-config-serial.yaml
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/image-config-serial.yaml
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
# To copy an image between projects:
|
||||
# `gcloud compute --project <to-project> disks create <image name> --image=https://www.googleapis.com/compute/v1/projects/<from-project>/global/images/<image-name>`
|
||||
# `gcloud compute --project <to-project> images create <image-name> --source-disk=<image-name>`
|
||||
images:
|
||||
ubuntu:
|
||||
image: ubuntu-gke-1604-xenial-v20170420-1 # docker 1.12.6
|
||||
project: ubuntu-os-gke-cloud
|
||||
coreos-alpha:
|
||||
image: coreos-alpha-1122-0-0-v20160727 # docker 1.11.2
|
||||
project: coreos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
|
||||
containervm:
|
||||
image: e2e-node-containervm-v20161208-image # docker 1.11.2
|
||||
project: kubernetes-node-e2e-images
|
||||
cos-stable2:
|
||||
image_regex: cos-stable-59-9460-64-0 # docker 1.11.2
|
||||
project: cos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init-gpu.yaml,gci-update-strategy=update_disabled"
|
||||
resources:
|
||||
accelerators:
|
||||
- type: nvidia-tesla-k80
|
||||
count: 2
|
||||
cos-stable1:
|
||||
image_regex: cos-stable-60-9592-84-0 # docker 1.13.1
|
||||
project: cos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/cos-init-live-restore.yaml,gci-update-strategy=update_disabled"
|
22
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/image-config.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/image-config.yaml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
# To copy an image between projects:
|
||||
# `gcloud compute --project <to-project> disks create <image name> --image=https://www.googleapis.com/compute/v1/projects/<from-project>/global/images/<image-name>`
|
||||
# `gcloud compute --project <to-project> images create <image-name> --source-disk=<image-name>`
|
||||
images:
|
||||
ubuntu:
|
||||
image: ubuntu-gke-1604-xenial-v20170420-1 # docker 1.12.6
|
||||
project: ubuntu-os-gke-cloud
|
||||
coreos-alpha:
|
||||
image: coreos-alpha-1122-0-0-v20160727 # docker 1.11.2
|
||||
project: coreos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/coreos-init.json"
|
||||
containervm:
|
||||
image: e2e-node-containervm-v20161208-image # docker 1.11.2
|
||||
project: kubernetes-node-e2e-images
|
||||
cos-stable2:
|
||||
image_regex: cos-stable-59-9460-64-0 # docker 1.11.2
|
||||
project: cos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
cos-stable1:
|
||||
image_regex: cos-stable-60-9592-84-0 # docker 1.13.1
|
||||
project: cos-cloud
|
||||
metadata: "user-data<test/e2e_node/jenkins/cos-init-live-restore.yaml,gci-update-strategy=update_disabled"
|
12
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties
generated
vendored
Normal file
12
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=
|
||||
GCE_IMAGES=ubuntu-gke-1604-xenial-v20170420-1
|
||||
GCE_IMAGE_PROJECT=ubuntu-os-gke-cloud
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ubuntu-node
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TIMEOUT=1h
|
||||
# Use the system spec defined in test/e2e_node/system/specs/gke.yaml.
|
||||
SYSTEM_SPEC_NAME=gke
|
8
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci.properties
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-ci.properties
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TIMEOUT=1h
|
11
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-flaky.properties
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-flaky.properties
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Flaky\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,LocalStorageCapacityIsolation=true,PodPriority=true'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-pull.properties
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-pull.properties
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-pr-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=
|
||||
GCE_IMAGES=ubuntu-gke-1604-xenial-v20170420-1
|
||||
GCE_IMAGE_PROJECT=ubuntu-os-gke-cloud
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ubuntu-node-serial
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
# Use the system spec defined at test/e2e_node/system/specs/gke.yaml.
|
||||
SYSTEM_SPEC_NAME=gke
|
10
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial.properties
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/jenkins-serial.properties
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
GCE_HOSTS=
|
||||
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config-serial.yaml
|
||||
GCE_ZONE=us-west1-b
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
25
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/template.properties
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/template.properties
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Copy this file to your home directory and modify
|
||||
# User used on the gce instances to run the test.
|
||||
GCE_USER=
|
||||
# Path to a yaml or json file describing images to run or empty
|
||||
GCE_IMAGE_CONFIG_PATH=
|
||||
# Names of gce hosts to test against (must be resolvable) or empty
|
||||
GCE_HOSTS=
|
||||
# Comma-separated names of gce images to test or empty (one or more of GCE_IMAGE_CONFIG_PATH, GCE_IMAGES, GCE_HOSTS is required)
|
||||
GCE_IMAGES=
|
||||
# Gce zone to use - required when using GCE_IMAGES
|
||||
GCE_ZONE=
|
||||
# Gce project to use for creating instances
|
||||
# required when using GCE_IMAGES or GCE_IMAGE_CONFIG_PATH
|
||||
GCE_PROJECT=
|
||||
# Gce project to use for GCE_IMAGES
|
||||
# required when using GCE_IMAGES
|
||||
GCE_IMAGE_PROJECT=
|
||||
# If true, delete instances created from GCE_IMAGES/GCE_IMAGE_CONFIG_PATH and files copied to GCE_HOSTS
|
||||
CLEANUP=true
|
||||
# KUBELET_ARGS are the arguments passed to kubelet. The args will override corresponding default kubelet
|
||||
# setting in the test framework and --kubelet-flags in TEST_ARGS.
|
||||
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
# TEST_ARGS are args passed to node e2e test.
|
||||
TEST_ARGS=''
|
29
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/ubuntu-14.04-nvidia-install.sh
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/ubuntu-14.04-nvidia-install.sh
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is meant to install Nvidia drivers on Ubuntu 14.04 GCE VMs.
|
||||
# This script is meant to facilitate testing of Nvidia GPU support in Kubernetes.
|
||||
echo "Checking for CUDA and installing."
|
||||
# Check for CUDA and try to install.
|
||||
if ! dpkg-query -W cuda; then
|
||||
curl -O http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_8.0.61-1_amd64.deb
|
||||
dpkg -i ./cuda-repo-ubuntu1404_8.0.61-1_amd64.deb
|
||||
apt-get update
|
||||
apt-get install cuda -y
|
||||
apt-get install linux-headers-$(uname -r) -y
|
||||
fi
|
||||
## Pre-loads kernel modules
|
||||
nvidia-smi
|
121
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/ubuntu-init-docker.yaml
generated
vendored
Normal file
121
vendor/k8s.io/kubernetes/test/e2e_node/jenkins/ubuntu-init-docker.yaml
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /etc/systemd/system/upgrade-docker.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Upgrade Docker Binaries
|
||||
Requires=network-online.target
|
||||
After=network-online.target docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
# RemainAfterExit so the service runs exactly once.
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /home/upgrade-docker/bin
|
||||
ExecStartPre=/bin/mount --bind /home/upgrade-docker/bin /home/upgrade-docker/bin
|
||||
ExecStartPre=/bin/mount -o remount,exec /home/upgrade-docker/bin
|
||||
ExecStart=/bin/bash /tmp/upgrade-docker/upgrade.sh
|
||||
ExecStartPost=-/bin/rm -rf /home/upgrade-docker/download
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
- path: /tmp/upgrade-docker/upgrade.sh
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# This script reads a GCE metadata key for the user speficied Docker
|
||||
# version, downloads, and replaces the builtin Docker with it.
|
||||
|
||||
set -x
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Checks if a Docker binary is the version we want.
|
||||
# $1: Docker binary
|
||||
# $2: Requested version
|
||||
check_installed() {
|
||||
local docker_bin="$1"
|
||||
local requested_version="$2"
|
||||
[[ "$(${docker_bin} --version)" =~ "Docker version ${requested_version}," ]]
|
||||
}
|
||||
|
||||
# $1: Docker version
|
||||
download_and_install_docker() {
|
||||
local requested_version="$1"
|
||||
local download_dir=/home/upgrade-docker/download/docker-"${requested_version}"
|
||||
local install_location=/home/upgrade-docker/bin
|
||||
local docker_tgz="docker-${requested_version}.tgz"
|
||||
|
||||
if [[ "${requested_version}" =~ "rc" ]]; then
|
||||
# RC releases all have the word "rc" in their version
|
||||
# number, e.g., "1.11.1-rc1".
|
||||
download_url="https://test.docker.com/builds/Linux/x86_64/${docker_tgz}"
|
||||
else
|
||||
download_url="https://get.docker.com/builds/Linux/x86_64/${docker_tgz}"
|
||||
fi
|
||||
|
||||
echo "Downloading Docker version ${requested_version} from "\
|
||||
"${download_url} to ${download_dir} ..."
|
||||
|
||||
# Download and install the binaries.
|
||||
mkdir -p "${download_dir}"/binaries
|
||||
/usr/bin/curl -o "${download_dir}/${docker_tgz}" --fail "${download_url}"
|
||||
tar xzf "${download_dir}/${docker_tgz}" -C "${download_dir}"/binaries
|
||||
cp "${download_dir}"/binaries/docker/docker* "${install_location}"
|
||||
mount --bind "${install_location}"/docker /usr/bin/docker
|
||||
mount --bind "${install_location}"/docker-containerd /usr/bin/containerd
|
||||
mount --bind "${install_location}"/docker-containerd-shim /usr/bin/containerd-shim
|
||||
mount --bind "${install_location}"/dockerd /usr/bin/dockerd
|
||||
mount --bind "${install_location}"/docker-proxy /usr/bin/docker-proxy
|
||||
mount --bind "${install_location}"/docker-runc /usr/sbin/runc
|
||||
echo "PATH=/home/upgrade-docker/bin:/sbin:/bin:/usr/sbin:/usr/bin" >> /etc/default/docker
|
||||
}
|
||||
|
||||
# $1: Metadata key
|
||||
get_metadata() {
|
||||
/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/"$1"
|
||||
}
|
||||
|
||||
main() {
|
||||
# Get the desired Docker version through the following metadata key.
|
||||
local requested_version="$(get_metadata "ubuntu-docker-version")"
|
||||
if [[ -z "${requested_version}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Stop the docker daemon during upgrade.
|
||||
systemctl stop docker
|
||||
download_and_install_docker "${requested_version}"
|
||||
|
||||
# Assert that the upgrade was successful.
|
||||
local rc=0
|
||||
check_installed /usr/bin/docker "${requested_version}" || rc=1
|
||||
systemctl start docker && exit $rc
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
runcmd:
|
||||
- systemctl daemon-reload
|
||||
- systemctl start upgrade-docker.service
|
||||
- mount /tmp /tmp -o remount,exec,suid
|
||||
- usermod -a -G docker jenkins
|
||||
- mkdir -p /var/lib/kubelet
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs
|
||||
- mount --bind /home/kubernetes/containerized_mounter/ /home/kubernetes/containerized_mounter/
|
||||
- mount -o remount, exec /home/kubernetes/containerized_mounter/
|
||||
- wget https://dl.k8s.io/gci-mounter/mounter.tar -O /tmp/mounter.tar
|
||||
- tar xvf /tmp/mounter.tar -C /home/kubernetes/containerized_mounter/rootfs
|
||||
- mkdir -p /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --rbind /var/lib/kubelet /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --make-rshared /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
- mount --bind /proc /home/kubernetes/containerized_mounter/rootfs/proc
|
||||
- mount --bind /dev /home/kubernetes/containerized_mounter/rootfs/dev
|
||||
- rm /tmp/mounter.tar
|
200
vendor/k8s.io/kubernetes/test/e2e_node/kubelet_test.go
generated
vendored
Normal file
200
vendor/k8s.io/kubernetes/test/e2e_node/kubelet_test.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-test")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
Context("when scheduling a busybox command in a pod", func() {
|
||||
podName := "busybox-scheduling-" + string(uuid.NewUUID())
|
||||
framework.ConformanceIt("it should print the output to logs", func() {
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
Eventually(func() string {
|
||||
sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
|
||||
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(rc)
|
||||
return buf.String()
|
||||
}, time.Minute, time.Second*4).Should(Equal("Hello World\n"))
|
||||
})
|
||||
})
|
||||
Context("when scheduling a busybox command that always fails in a pod", func() {
|
||||
var podName string
|
||||
|
||||
BeforeEach(func() {
|
||||
podName = "bin-false" + string(uuid.NewUUID())
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/false"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
It("should have an error terminated reason", func() {
|
||||
Eventually(func() error {
|
||||
podData, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podData.Status.ContainerStatuses) != 1 {
|
||||
return fmt.Errorf("expected only one container in the pod %q", podName)
|
||||
}
|
||||
contTerminatedState := podData.Status.ContainerStatuses[0].State.Terminated
|
||||
if contTerminatedState == nil {
|
||||
return fmt.Errorf("expected state to be terminated. Got pod status: %+v", podData.Status)
|
||||
}
|
||||
if contTerminatedState.Reason != "Error" {
|
||||
return fmt.Errorf("expected terminated state reason to be error. Got %+v", contTerminatedState)
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
|
||||
It("should be possible to delete", func() {
|
||||
err := podClient.Delete(podName, &metav1.DeleteOptions{})
|
||||
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
|
||||
})
|
||||
})
|
||||
Context("when scheduling a busybox Pod with hostAliases", func() {
|
||||
podName := "busybox-host-aliases" + string(uuid.NewUUID())
|
||||
|
||||
It("it should write entries to /etc/hosts", func() {
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/sh", "-c", "cat /etc/hosts; sleep 6000"},
|
||||
},
|
||||
},
|
||||
HostAliases: []v1.HostAlias{
|
||||
{
|
||||
IP: "123.45.67.89",
|
||||
Hostnames: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
Eventually(func() error {
|
||||
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
|
||||
defer rc.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(rc)
|
||||
hostsFileContent := buf.String()
|
||||
|
||||
if !strings.Contains(hostsFileContent, "123.45.67.89\tfoo") || !strings.Contains(hostsFileContent, "123.45.67.89\tbar") {
|
||||
return fmt.Errorf("expected hosts file to contain entries from HostAliases. Got:\n%+v", hostsFileContent)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
})
|
||||
Context("when scheduling a read only busybox container", func() {
|
||||
podName := "busybox-readonly-fs" + string(uuid.NewUUID())
|
||||
framework.ConformanceIt("it should not write to root filesystem", func() {
|
||||
isReadOnly := true
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/sh", "-c", "echo test > /file; sleep 240"},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &isReadOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
Eventually(func() string {
|
||||
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(rc)
|
||||
return buf.String()
|
||||
}, time.Minute, time.Second*4).Should(Equal("/bin/sh: can't create /file: Read-only file system\n"))
|
||||
})
|
||||
})
|
||||
})
|
153
vendor/k8s.io/kubernetes/test/e2e_node/lifecycle_hook_test.go
generated
vendored
Normal file
153
vendor/k8s.io/kubernetes/test/e2e_node/lifecycle_hook_test.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
f := framework.NewDefaultFramework("container-lifecycle-hook")
|
||||
var podClient *framework.PodClient
|
||||
const (
|
||||
podCheckInterval = 1 * time.Second
|
||||
postStartWaitTimeout = 2 * time.Minute
|
||||
preStopWaitTimeout = 30 * time.Second
|
||||
)
|
||||
Context("when create a pod with lifecycle hook", func() {
|
||||
var targetIP string
|
||||
podHandleHookRequest := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-handle-http-request",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pod-handle-http-request",
|
||||
Image: imageutils.GetE2EImage(imageutils.Netexec),
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8080,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
By("create the container to handle the HTTPGet hook request.")
|
||||
newPod := podClient.CreateSync(podHandleHookRequest)
|
||||
targetIP = newPod.Status.PodIP
|
||||
})
|
||||
testPodWithHook := func(podWithHook *v1.Pod) {
|
||||
By("create the pod with lifecycle hook")
|
||||
podClient.CreateSync(podWithHook)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
|
||||
By("check poststart hook")
|
||||
Eventually(func() error {
|
||||
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
|
||||
`GET /echo\?msg=poststart`)
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
|
||||
}
|
||||
By("delete the pod with lifecycle hook")
|
||||
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
|
||||
By("check prestop hook")
|
||||
Eventually(func() error {
|
||||
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
|
||||
`GET /echo\?msg=prestop`)
|
||||
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
|
||||
}
|
||||
}
|
||||
framework.ConformanceIt("should execute poststart exec hook properly", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=poststart"},
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute prestop exec hook properly", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=prestop"},
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute poststart http hook properly", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/echo?msg=poststart",
|
||||
Host: targetIP,
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", framework.GetPauseImageNameForHostArch(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute prestop http hook properly", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/echo?msg=prestop",
|
||||
Host: targetIP,
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-prestop-http-hook", framework.GetPauseImageNameForHostArch(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func getPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Lifecycle: lifecycle,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
124
vendor/k8s.io/kubernetes/test/e2e_node/log_path_test.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/test/e2e_node/log_path_test.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const (
|
||||
logString = "This is the expected log content of this node e2e test"
|
||||
|
||||
logPodName = "logger-pod"
|
||||
logContName = "logger-container"
|
||||
checkPodName = "checker-pod"
|
||||
checkContName = "checker-container"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("ContainerLogPath", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-container-log-path")
|
||||
Describe("Pod with a container", func() {
|
||||
Context("printed log to stdout", func() {
|
||||
It("should print log to correct log path", func() {
|
||||
podClient := f.PodClient()
|
||||
ns := f.Namespace.Name
|
||||
|
||||
logDirVolumeName := "log-dir-vol"
|
||||
logDir := kubelet.ContainerLogsDir
|
||||
|
||||
logPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: logPodName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// this pod is expected to exit successfully
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: logContName,
|
||||
Command: []string{"sh", "-c", "echo " + logString},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
podClient.Create(logPod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, logPodName, ns)
|
||||
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
|
||||
|
||||
// get containerID from created Pod
|
||||
createdLogPod, err := podClient.Get(logPodName, metav1.GetOptions{})
|
||||
logConID := kubecontainer.ParseContainerID(createdLogPod.Status.ContainerStatuses[0].ContainerID)
|
||||
framework.ExpectNoError(err, "Failed to get pod: %s", logPodName)
|
||||
|
||||
expectedlogFile := logDir + "/" + logPodName + "_" + ns + "_" + logContName + "-" + logConID.ID + ".log"
|
||||
|
||||
hostPathType := new(v1.HostPathType)
|
||||
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
|
||||
|
||||
checkPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: checkPodName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// this pod is expected to exit successfully
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: checkContName,
|
||||
// If we find expected log file and contains right content, exit 0
|
||||
// else, keep checking until test timeout
|
||||
Command: []string{"sh", "-c", "while true; do if [ -e " + expectedlogFile + " ] && grep -q " + logString + " " + expectedlogFile + "; then exit 0; fi; sleep 1; done"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: logDirVolumeName,
|
||||
// mount ContainerLogsDir to the same path in container
|
||||
MountPath: expectedlogFile,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: logDirVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: expectedlogFile,
|
||||
Type: hostPathType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
podClient.Create(checkPod)
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, checkPodName, ns)
|
||||
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", checkPodName)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
287
vendor/k8s.io/kubernetes/test/e2e_node/memory_eviction_test.go
generated
vendored
Normal file
287
vendor/k8s.io/kubernetes/test/e2e_node/memory_eviction_test.go
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Eviction Policy is described here:
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/kubelet-eviction.md
|
||||
|
||||
var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", func() {
|
||||
var (
|
||||
evictionHard = map[string]string{"memory.available": "40%"}
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("eviction-test")
|
||||
|
||||
// This is a dummy context to wrap the outer AfterEach, which will run after the inner AfterEach.
|
||||
// We want to list all of the node and pod events, including any that occur while waiting for
|
||||
// memory pressure reduction, even if we time out while waiting.
|
||||
Context("", func() {
|
||||
|
||||
AfterEach(func() {
|
||||
// Print events
|
||||
logNodeEvents(f)
|
||||
logPodEvents(f)
|
||||
})
|
||||
Context("", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(c *kubeletconfig.KubeletConfiguration) {
|
||||
c.EvictionHard = evictionHard
|
||||
})
|
||||
|
||||
Context("when there is memory pressure", func() {
|
||||
AfterEach(func() {
|
||||
// Wait for the memory pressure condition to disappear from the node status before continuing.
|
||||
By("waiting for the memory pressure condition on the node to disappear before ending the test.")
|
||||
Eventually(func() error {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("tried to get node list but got error: %v", err)
|
||||
}
|
||||
// Assuming that there is only one node, because this is a node e2e test.
|
||||
if len(nodeList.Items) != 1 {
|
||||
return fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
_, pressure := nodeutil.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
|
||||
if pressure != nil && pressure.Status == v1.ConditionTrue {
|
||||
return fmt.Errorf("node is still reporting memory pressure condition: %s", pressure)
|
||||
}
|
||||
return nil
|
||||
}, 5*time.Minute, 15*time.Second).Should(BeNil())
|
||||
|
||||
// Check available memory after condition disappears, just in case:
|
||||
// Wait for available memory to decrease to a reasonable level before ending the test.
|
||||
// This helps prevent interference with tests that start immediately after this one.
|
||||
By("waiting for available memory to decrease to a reasonable level before ending the test.")
|
||||
Eventually(func() error {
|
||||
summary, err := getNodeSummary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if summary.Node.Memory.AvailableBytes == nil {
|
||||
return fmt.Errorf("summary.Node.Memory.AvailableBytes was nil, cannot get memory stats.")
|
||||
}
|
||||
if summary.Node.Memory.WorkingSetBytes == nil {
|
||||
return fmt.Errorf("summary.Node.Memory.WorkingSetBytes was nil, cannot get memory stats.")
|
||||
}
|
||||
avail := *summary.Node.Memory.AvailableBytes
|
||||
wset := *summary.Node.Memory.WorkingSetBytes
|
||||
|
||||
// memory limit = avail + wset
|
||||
limit := avail + wset
|
||||
halflimit := limit / 2
|
||||
|
||||
// Wait for at least half of memory limit to be available
|
||||
if avail >= halflimit {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("current available memory is: %d bytes. Expected at least %d bytes available.", avail, halflimit)
|
||||
}, 5*time.Minute, 15*time.Second).Should(BeNil())
|
||||
|
||||
// TODO(mtaufen): 5 minute wait to stop flaky test bleeding while we figure out what is actually going on.
|
||||
// If related to pressure transition period in eviction manager, probably only need to wait
|
||||
// just over 30s becasue that is the transition period set for node e2e tests. But since we
|
||||
// know 5 min works and we don't know if transition period is the problem, wait 5 min for now.
|
||||
time.Sleep(5 * time.Minute)
|
||||
|
||||
// Finally, try starting a new pod and wait for it to be scheduled and running.
|
||||
// This is the final check to try to prevent interference with subsequent tests.
|
||||
podName := "admit-best-effort-pod"
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
It("should evict pods in the correct order (besteffort first, then burstable, then guaranteed)", func() {
|
||||
By("creating a guaranteed pod, a burstable pod, and a besteffort pod.")
|
||||
|
||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||
guaranteed := getMemhogPod("guaranteed-pod", "guaranteed", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}})
|
||||
guaranteed = f.PodClient().CreateSync(guaranteed)
|
||||
glog.Infof("pod created with name: %s", guaranteed.Name)
|
||||
|
||||
// A pod is burstable if limits and requests do not match across all containers.
|
||||
burstable := getMemhogPod("burstable-pod", "burstable", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}})
|
||||
burstable = f.PodClient().CreateSync(burstable)
|
||||
glog.Infof("pod created with name: %s", burstable.Name)
|
||||
|
||||
// A pod is besteffort if none of its containers have specified any requests or limits .
|
||||
besteffort := getMemhogPod("besteffort-pod", "besteffort", v1.ResourceRequirements{})
|
||||
besteffort = f.PodClient().CreateSync(besteffort)
|
||||
glog.Infof("pod created with name: %s", besteffort.Name)
|
||||
|
||||
// We poll until timeout or all pods are killed.
|
||||
// Inside the func, we check that all pods are in a valid phase with
|
||||
// respect to the eviction order of best effort, then burstable, then guaranteed.
|
||||
By("polling the Status.Phase of each pod and checking for violations of the eviction order.")
|
||||
Eventually(func() error {
|
||||
|
||||
gteed, gtErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(guaranteed.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(gtErr, fmt.Sprintf("getting pod %s", guaranteed.Name))
|
||||
gteedPh := gteed.Status.Phase
|
||||
|
||||
burst, buErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(burstable.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(buErr, fmt.Sprintf("getting pod %s", burstable.Name))
|
||||
burstPh := burst.Status.Phase
|
||||
|
||||
best, beErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(besteffort.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(beErr, fmt.Sprintf("getting pod %s", besteffort.Name))
|
||||
bestPh := best.Status.Phase
|
||||
|
||||
glog.Infof("pod phase: guaranteed: %v, burstable: %v, besteffort: %v", gteedPh, burstPh, bestPh)
|
||||
|
||||
// NOTE/TODO(mtaufen): This should help us debug why burstable appears to fail before besteffort in some
|
||||
// scenarios. We have seen some evidence that the eviction manager has in fact done the
|
||||
// right thing and evicted the besteffort first, and attempted to change the besteffort phase
|
||||
// to "Failed" when it evicts it, but that for some reason the test isn't seeing the updated
|
||||
// phase. I'm trying to confirm or deny this.
|
||||
// The eviction manager starts trying to evict things when the node comes under memory
|
||||
// pressure, and the eviction manager reports this information in the pressure condition. If we
|
||||
// see the eviction manager reporting a pressure condition for a while without the besteffort failing,
|
||||
// and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we
|
||||
// will have more reason to believe the phase is out of date.
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("tried to get node list but got error: %v", err)
|
||||
}
|
||||
if len(nodeList.Items) != 1 {
|
||||
glog.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
_, pressure := nodeutil.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
|
||||
glog.Infof("node pressure condition: %s", pressure)
|
||||
|
||||
// NOTE/TODO(mtaufen): Also log (at least temporarily) the actual memory consumption on the node.
|
||||
// I used this to plot memory usage from a successful test run and it looks the
|
||||
// way I would expect. I want to see what the plot from a flake looks like.
|
||||
summary, err := getNodeSummary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if summary.Node.Memory.WorkingSetBytes != nil {
|
||||
wset := *summary.Node.Memory.WorkingSetBytes
|
||||
glog.Infof("Node's working set is (bytes): %v", wset)
|
||||
|
||||
}
|
||||
|
||||
if bestPh == v1.PodRunning {
|
||||
Expect(burstPh).NotTo(Equal(v1.PodFailed), "burstable pod failed before best effort pod")
|
||||
Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before best effort pod")
|
||||
} else if burstPh == v1.PodRunning {
|
||||
Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before burstable pod")
|
||||
}
|
||||
|
||||
// When both besteffort and burstable have been evicted, the test has completed.
|
||||
if bestPh == v1.PodFailed && burstPh == v1.PodFailed {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("besteffort and burstable have not yet both been evicted.")
|
||||
|
||||
}, 60*time.Minute, 5*time.Second).Should(BeNil())
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *v1.Pod {
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "MEMORY_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// If there is a limit specified, pass 80% of it for -mem-total, otherwise use the downward API
|
||||
// to pass limits.memory, which will be the total memory available.
|
||||
// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
|
||||
// which will cause the test to fail inappropriately.
|
||||
var memLimit string
|
||||
if limit, ok := res.Limits[v1.ResourceMemory]; ok {
|
||||
memLimit = strconv.Itoa(int(
|
||||
float64(limit.Value()) * 0.8))
|
||||
} else {
|
||||
memLimit = "$(MEMORY_LIMIT)"
|
||||
}
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: ctnName,
|
||||
Image: "gcr.io/google-containers/stress:v1",
|
||||
ImagePullPolicy: "Always",
|
||||
Env: env,
|
||||
// 60 min timeout * 60s / tick per 10s = 360 ticks before timeout => ~11.11Mi/tick
|
||||
// to fill ~4Gi of memory, so initial ballpark 12Mi/tick.
|
||||
// We might see flakes due to timeout if the total memory on the nodes increases.
|
||||
Args: []string{"-mem-alloc-size", "12Mi", "-mem-alloc-sleep", "10s", "-mem-total", memLimit},
|
||||
Resources: res,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
191
vendor/k8s.io/kubernetes/test/e2e_node/mirror_pod_test.go
generated
vendored
Normal file
191
vendor/k8s.io/kubernetes/test/e2e_node/mirror_pod_test.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
f := framework.NewDefaultFramework("mirror-pod")
|
||||
Context("when create a mirror pod ", func() {
|
||||
var ns, manifestPath, staticPodName, mirrorPodName string
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||
|
||||
manifestPath = framework.TestContext.KubeletConfig.PodManifestPath
|
||||
|
||||
By("create the static pod")
|
||||
err := createStaticPod(manifestPath, staticPodName, ns,
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim), v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be running")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
framework.ConformanceIt("should be updated when static pod updated", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
uid := pod.UID
|
||||
|
||||
By("update the static pod container image")
|
||||
image := framework.GetPauseImageNameForHostArch()
|
||||
err = createStaticPod(manifestPath, staticPodName, ns, image, v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be updated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
|
||||
By("check the mirror pod container image is updated")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(len(pod.Spec.Containers)).Should(Equal(1))
|
||||
Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
|
||||
})
|
||||
framework.ConformanceIt("should be recreated when mirror pod gracefully deleted", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
uid := pod.UID
|
||||
|
||||
By("delete the mirror pod with grace period 30s")
|
||||
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be recreated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
framework.ConformanceIt("should be recreated when mirror pod forcibly deleted", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
uid := pod.UID
|
||||
|
||||
By("delete the mirror pod with grace period 0s")
|
||||
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be recreated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
AfterEach(func() {
|
||||
By("delete the static pod")
|
||||
err := deleteStaticPod(manifestPath, staticPodName, ns)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to disappear")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func staticPodPath(dir, name, namespace string) string {
|
||||
return filepath.Join(dir, namespace+"-"+name+".yaml")
|
||||
}
|
||||
|
||||
func createStaticPod(dir, name, namespace, image string, restart v1.RestartPolicy) error {
|
||||
template := `
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: %s
|
||||
namespace: %s
|
||||
spec:
|
||||
containers:
|
||||
- name: test
|
||||
image: %s
|
||||
restartPolicy: %s
|
||||
`
|
||||
file := staticPodPath(dir, name, namespace)
|
||||
podYaml := fmt.Sprintf(template, name, namespace, image, string(restart))
|
||||
|
||||
f, err := os.OpenFile(file, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.WriteString(podYaml)
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteStaticPod(dir, name, namespace string) error {
|
||||
file := staticPodPath(dir, name, namespace)
|
||||
return os.Remove(file)
|
||||
}
|
||||
|
||||
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error {
|
||||
_, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return goerrors.New("pod not disappear")
|
||||
}
|
||||
|
||||
func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error {
|
||||
pod, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
|
||||
}
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkMirrorPodRecreatedAndRunnig(cl clientset.Interface, name, namespace string, oUID types.UID) error {
|
||||
pod, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
|
||||
}
|
||||
if pod.UID == oUID {
|
||||
return fmt.Errorf("expected the uid of mirror pod %q to be changed, got %q", name, pod.UID)
|
||||
}
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
|
||||
}
|
||||
return nil
|
||||
}
|
248
vendor/k8s.io/kubernetes/test/e2e_node/node_container_manager_test.go
generated
vendored
Normal file
248
vendor/k8s.io/kubernetes/test/e2e_node/node_container_manager_test.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.EnforceNodeAllocatable = []string{"pods", "kube-reserved", "system-reserved"}
|
||||
initialConfig.SystemReserved = map[string]string{
|
||||
string(v1.ResourceCPU): "100m",
|
||||
string(v1.ResourceMemory): "100Mi",
|
||||
}
|
||||
initialConfig.KubeReserved = map[string]string{
|
||||
string(v1.ResourceCPU): "100m",
|
||||
string(v1.ResourceMemory): "100Mi",
|
||||
}
|
||||
initialConfig.EvictionHard = map[string]string{"memory.available": "100Mi"}
|
||||
// Necessary for allocatable cgroup creation.
|
||||
initialConfig.CgroupsPerQOS = true
|
||||
initialConfig.KubeReservedCgroup = kubeReservedCgroup
|
||||
initialConfig.SystemReservedCgroup = systemReservedCgroup
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("node-container-manager")
|
||||
Describe("Validate Node Allocatable", func() {
|
||||
It("set's up the node and runs the test", func() {
|
||||
framework.ExpectNoError(runTest(f))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func expectFileValToEqual(filePath string, expectedValue, delta int64) error {
|
||||
out, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file %q", filePath)
|
||||
}
|
||||
actual, err := strconv.ParseInt(strings.TrimSpace(string(out)), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse output %v", err)
|
||||
}
|
||||
|
||||
// Ensure that values are within a delta range to work arounding rounding errors.
|
||||
if (actual < (expectedValue - delta)) || (actual > (expectedValue + delta)) {
|
||||
return fmt.Errorf("Expected value at %q to be between %d and %d. Got %d", filePath, (expectedValue - delta), (expectedValue + delta), actual)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAllocatableLimits(cpu, memory string, capacity v1.ResourceList) (*resource.Quantity, *resource.Quantity) {
|
||||
var allocatableCPU, allocatableMemory *resource.Quantity
|
||||
// Total cpu reservation is 200m.
|
||||
for k, v := range capacity {
|
||||
if k == v1.ResourceCPU {
|
||||
allocatableCPU = v.Copy()
|
||||
allocatableCPU.Sub(resource.MustParse(cpu))
|
||||
}
|
||||
if k == v1.ResourceMemory {
|
||||
allocatableMemory = v.Copy()
|
||||
allocatableMemory.Sub(resource.MustParse(memory))
|
||||
}
|
||||
}
|
||||
return allocatableCPU, allocatableMemory
|
||||
}
|
||||
|
||||
const (
|
||||
kubeReservedCgroup = "/kube_reserved"
|
||||
systemReservedCgroup = "/system_reserved"
|
||||
)
|
||||
|
||||
func createIfNotExists(cm cm.CgroupManager, cgroupConfig *cm.CgroupConfig) error {
|
||||
if !cm.Exists(cgroupConfig.Name) {
|
||||
if err := cm.Create(cgroupConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTemporaryCgroupsForReservation(cgroupManager cm.CgroupManager) error {
|
||||
// Create kube reserved cgroup
|
||||
cgroupConfig := &cm.CgroupConfig{
|
||||
Name: cm.CgroupName(kubeReservedCgroup),
|
||||
}
|
||||
if err := createIfNotExists(cgroupManager, cgroupConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create system reserved cgroup
|
||||
cgroupConfig.Name = cm.CgroupName(systemReservedCgroup)
|
||||
|
||||
return createIfNotExists(cgroupManager, cgroupConfig)
|
||||
}
|
||||
|
||||
func destroyTemporaryCgroupsForReservation(cgroupManager cm.CgroupManager) error {
|
||||
// Create kube reserved cgroup
|
||||
cgroupConfig := &cm.CgroupConfig{
|
||||
Name: cm.CgroupName(kubeReservedCgroup),
|
||||
}
|
||||
if err := cgroupManager.Destroy(cgroupConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
cgroupConfig.Name = cm.CgroupName(systemReservedCgroup)
|
||||
return cgroupManager.Destroy(cgroupConfig)
|
||||
}
|
||||
|
||||
func runTest(f *framework.Framework) error {
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
subsystems, err := cm.GetCgroupSubsystems()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Get current kubelet configuration
|
||||
oldCfg, err = getCurrentKubeletConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a cgroup manager object for manipulating cgroups.
|
||||
cgroupManager := cm.NewCgroupManager(subsystems, oldCfg.CgroupDriver)
|
||||
|
||||
defer destroyTemporaryCgroupsForReservation(cgroupManager)
|
||||
defer func() {
|
||||
if oldCfg != nil {
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, oldCfg))
|
||||
}
|
||||
}()
|
||||
if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil {
|
||||
return err
|
||||
}
|
||||
newCfg := oldCfg.DeepCopy()
|
||||
// Change existing kubelet configuration
|
||||
setDesiredConfiguration(newCfg)
|
||||
// Set the new kubelet configuration.
|
||||
err = setKubeletConfiguration(f, newCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Set new config and current config.
|
||||
currentConfig := newCfg
|
||||
|
||||
expectedNAPodCgroup := path.Join(currentConfig.CgroupRoot, "kubepods")
|
||||
if !cgroupManager.Exists(cm.CgroupName(expectedNAPodCgroup)) {
|
||||
return fmt.Errorf("Expected Node Allocatable Cgroup Does not exist")
|
||||
}
|
||||
// TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings.
|
||||
// The node may not have updated capacity and allocatable yet, so check that it happens eventually.
|
||||
Eventually(func() error {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(nodeList.Items) != 1 {
|
||||
return fmt.Errorf("Unexpected number of node objects for node e2e. Expects only one node: %+v", nodeList)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
capacity := node.Status.Capacity
|
||||
allocatableCPU, allocatableMemory := getAllocatableLimits("200m", "200Mi", capacity)
|
||||
// Total Memory reservation is 200Mi excluding eviction thresholds.
|
||||
// Expect CPU shares on node allocatable cgroup to equal allocatable.
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], "kubepods", "cpu.shares"), int64(cm.MilliCPUToShares(allocatableCPU.MilliValue())), 10); err != nil {
|
||||
return err
|
||||
}
|
||||
// Expect Memory limit on node allocatable cgroup to equal allocatable.
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], "kubepods", "memory.limit_in_bytes"), allocatableMemory.Value(), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check that Allocatable reported to scheduler includes eviction thresholds.
|
||||
schedulerAllocatable := node.Status.Allocatable
|
||||
// Memory allocatable should take into account eviction thresholds.
|
||||
allocatableCPU, allocatableMemory = getAllocatableLimits("200m", "300Mi", capacity)
|
||||
// Expect allocatable to include all resources in capacity.
|
||||
if len(schedulerAllocatable) != len(capacity) {
|
||||
return fmt.Errorf("Expected all resources in capacity to be found in allocatable")
|
||||
}
|
||||
// CPU based evictions are not supported.
|
||||
if allocatableCPU.Cmp(schedulerAllocatable[v1.ResourceCPU]) != 0 {
|
||||
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable[v1.ResourceCPU], capacity[v1.ResourceCPU])
|
||||
}
|
||||
if allocatableMemory.Cmp(schedulerAllocatable[v1.ResourceMemory]) != 0 {
|
||||
return fmt.Errorf("Unexpected memory allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableMemory, schedulerAllocatable[v1.ResourceMemory], capacity[v1.ResourceMemory])
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 5*time.Second).Should(BeNil())
|
||||
|
||||
if !cgroupManager.Exists(cm.CgroupName(kubeReservedCgroup)) {
|
||||
return fmt.Errorf("Expected kube reserved cgroup Does not exist")
|
||||
}
|
||||
// Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`.
|
||||
kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceCPU)])
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], kubeReservedCgroup, "cpu.shares"), int64(cm.MilliCPUToShares(kubeReservedCPU.MilliValue())), 10); err != nil {
|
||||
return err
|
||||
}
|
||||
// Expect Memory limit kube reserved cgroup to equal configured value `100Mi`.
|
||||
kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceMemory)])
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], kubeReservedCgroup, "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cgroupManager.Exists(cm.CgroupName(systemReservedCgroup)) {
|
||||
return fmt.Errorf("Expected system reserved cgroup Does not exist")
|
||||
}
|
||||
// Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`.
|
||||
systemReservedCPU := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceCPU)])
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], systemReservedCgroup, "cpu.shares"), int64(cm.MilliCPUToShares(systemReservedCPU.MilliValue())), 10); err != nil {
|
||||
return err
|
||||
}
|
||||
// Expect Memory limit on node allocatable cgroup to equal allocatable.
|
||||
systemReservedMemory := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceMemory)])
|
||||
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], systemReservedCgroup, "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
453
vendor/k8s.io/kubernetes/test/e2e_node/node_problem_detector_linux.go
generated
vendored
Normal file
453
vendor/k8s.io/kubernetes/test/e2e_node/node_problem_detector_linux.go
generated
vendored
Normal file
@ -0,0 +1,453 @@
|
||||
// +build cgo,linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
||||
const (
|
||||
pollInterval = 1 * time.Second
|
||||
pollConsistent = 5 * time.Second
|
||||
pollTimeout = 1 * time.Minute
|
||||
image = "gcr.io/google_containers/node-problem-detector:v0.4.1"
|
||||
)
|
||||
f := framework.NewDefaultFramework("node-problem-detector")
|
||||
var c clientset.Interface
|
||||
var uid string
|
||||
var ns, name, configName, eventNamespace string
|
||||
var bootTime, nodeTime time.Time
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
uid = string(uuid.NewUUID())
|
||||
name = "node-problem-detector-" + uid
|
||||
configName = "node-problem-detector-config-" + uid
|
||||
// There is no namespace for Node, event recorder will set default namespace for node events.
|
||||
eventNamespace = metav1.NamespaceDefault
|
||||
})
|
||||
|
||||
// Test system log monitor. We may add other tests if we have more problem daemons in the future.
|
||||
framework.KubeDescribe("SystemLogMonitor", func() {
|
||||
const (
|
||||
// Use test condition to avoid changing the real node condition in use.
|
||||
// TODO(random-liu): Now node condition could be arbitrary string, consider wether we need to
|
||||
// add TestCondition when switching to predefined condition list.
|
||||
condition = v1.NodeConditionType("TestCondition")
|
||||
|
||||
// File paths used in the test.
|
||||
logFile = "/log/test.log"
|
||||
configFile = "/config/testconfig.json"
|
||||
etcLocaltime = "/etc/localtime"
|
||||
|
||||
// Volumes used in the test.
|
||||
configVolume = "config"
|
||||
logVolume = "log"
|
||||
localtimeVolume = "localtime"
|
||||
|
||||
// Reasons and messages used in the test.
|
||||
defaultReason = "Default"
|
||||
defaultMessage = "default message"
|
||||
tempReason = "Temporary"
|
||||
tempMessage = "temporary error"
|
||||
permReason1 = "Permanent1"
|
||||
permMessage1 = "permanent error 1"
|
||||
permReason2 = "Permanent2"
|
||||
permMessage2 = "permanent error 2"
|
||||
)
|
||||
var source, config, hostLogFile string
|
||||
var lookback time.Duration
|
||||
var eventListOptions metav1.ListOptions
|
||||
|
||||
BeforeEach(func() {
|
||||
By("Calculate Lookback duration")
|
||||
var err error
|
||||
nodeTime, bootTime, err = getNodeTime()
|
||||
Expect(err).To(BeNil())
|
||||
// Set lookback duration longer than node up time.
|
||||
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
|
||||
lookback = nodeTime.Sub(bootTime) + time.Hour
|
||||
|
||||
// Randomize the source name
|
||||
source = "kernel-monitor-" + uid
|
||||
config = `
|
||||
{
|
||||
"plugin": "filelog",
|
||||
"pluginConfig": {
|
||||
"timestamp": "^.{15}",
|
||||
"message": "kernel: \\[.*\\] (.*)",
|
||||
"timestampFormat": "` + time.Stamp + `"
|
||||
},
|
||||
"logPath": "` + logFile + `",
|
||||
"lookback": "` + lookback.String() + `",
|
||||
"bufferSize": 10,
|
||||
"source": "` + source + `",
|
||||
"conditions": [
|
||||
{
|
||||
"type": "` + string(condition) + `",
|
||||
"reason": "` + defaultReason + `",
|
||||
"message": "` + defaultMessage + `"
|
||||
}
|
||||
],
|
||||
"rules": [
|
||||
{
|
||||
"type": "temporary",
|
||||
"reason": "` + tempReason + `",
|
||||
"pattern": "` + tempMessage + `"
|
||||
},
|
||||
{
|
||||
"type": "permanent",
|
||||
"condition": "` + string(condition) + `",
|
||||
"reason": "` + permReason1 + `",
|
||||
"pattern": "` + permMessage1 + ".*" + `"
|
||||
},
|
||||
{
|
||||
"type": "permanent",
|
||||
"condition": "` + string(condition) + `",
|
||||
"reason": "` + permReason2 + `",
|
||||
"pattern": "` + permMessage2 + ".*" + `"
|
||||
}
|
||||
]
|
||||
}`
|
||||
By("Generate event list options")
|
||||
selector := fields.Set{
|
||||
"involvedObject.kind": "Node",
|
||||
"involvedObject.name": framework.TestContext.NodeName,
|
||||
"involvedObject.namespace": metav1.NamespaceAll,
|
||||
"source": source,
|
||||
}.AsSelector().String()
|
||||
eventListOptions = metav1.ListOptions{FieldSelector: selector}
|
||||
By("Create the test log file")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Create config map for the node problem detector")
|
||||
_, err = c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: configName},
|
||||
Data: map[string]string{path.Base(configFile): config},
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Create the node problem detector")
|
||||
hostPathType := new(v1.HostPathType)
|
||||
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
HostNetwork: true,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: configVolume,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: configName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: logVolume,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: localtimeVolume,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: etcLocaltime,
|
||||
Type: hostPathType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Command: []string{"sh", "-c", "touch " + logFile + " && /node-problem-detector --logtostderr --system-log-monitors=" + configFile + fmt.Sprintf(" --apiserver-override=%s?inClusterConfig=false", framework.TestContext.Host)},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: logVolume,
|
||||
MountPath: path.Dir(logFile),
|
||||
},
|
||||
{
|
||||
Name: localtimeVolume,
|
||||
MountPath: etcLocaltime,
|
||||
},
|
||||
{
|
||||
Name: configVolume,
|
||||
MountPath: path.Dir(configFile),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
pod, err := f.PodClient().Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: remove hardcoded kubelet volume directory path
|
||||
// framework.TestContext.KubeVolumeDir is currently not populated for node e2e
|
||||
hostLogFile = "/var/lib/kubelet/pods/" + string(pod.UID) + "/volumes/kubernetes.io~empty-dir" + logFile
|
||||
})
|
||||
|
||||
It("should generate node condition and events for corresponding errors", func() {
|
||||
for _, test := range []struct {
|
||||
description string
|
||||
timestamp time.Time
|
||||
message string
|
||||
messageNum int
|
||||
events int
|
||||
conditionReason string
|
||||
conditionMessage string
|
||||
conditionType v1.ConditionStatus
|
||||
}{
|
||||
{
|
||||
description: "should generate default node condition",
|
||||
conditionReason: defaultReason,
|
||||
conditionMessage: defaultMessage,
|
||||
conditionType: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
description: "should not generate events for too old log",
|
||||
timestamp: bootTime.Add(-1 * time.Minute),
|
||||
message: tempMessage,
|
||||
messageNum: 3,
|
||||
conditionReason: defaultReason,
|
||||
conditionMessage: defaultMessage,
|
||||
conditionType: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
description: "should not change node condition for too old log",
|
||||
timestamp: bootTime.Add(-1 * time.Minute),
|
||||
message: permMessage1,
|
||||
messageNum: 1,
|
||||
conditionReason: defaultReason,
|
||||
conditionMessage: defaultMessage,
|
||||
conditionType: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
description: "should generate event for old log within lookback duration",
|
||||
timestamp: nodeTime,
|
||||
message: tempMessage,
|
||||
messageNum: 3,
|
||||
events: 3,
|
||||
conditionReason: defaultReason,
|
||||
conditionMessage: defaultMessage,
|
||||
conditionType: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
description: "should change node condition for old log within lookback duration",
|
||||
timestamp: nodeTime,
|
||||
message: permMessage1,
|
||||
messageNum: 1,
|
||||
events: 3, // event number should not change
|
||||
conditionReason: permReason1,
|
||||
conditionMessage: permMessage1,
|
||||
conditionType: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
description: "should generate event for new log",
|
||||
timestamp: nodeTime.Add(5 * time.Minute),
|
||||
message: tempMessage,
|
||||
messageNum: 3,
|
||||
events: 6,
|
||||
conditionReason: permReason1,
|
||||
conditionMessage: permMessage1,
|
||||
conditionType: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
description: "should not update node condition with the same reason",
|
||||
timestamp: nodeTime.Add(5 * time.Minute),
|
||||
message: permMessage1 + "different message",
|
||||
messageNum: 1,
|
||||
events: 6, // event number should not change
|
||||
conditionReason: permReason1,
|
||||
conditionMessage: permMessage1,
|
||||
conditionType: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
description: "should change node condition for new log",
|
||||
timestamp: nodeTime.Add(5 * time.Minute),
|
||||
message: permMessage2,
|
||||
messageNum: 1,
|
||||
events: 6, // event number should not change
|
||||
conditionReason: permReason2,
|
||||
conditionMessage: permMessage2,
|
||||
conditionType: v1.ConditionTrue,
|
||||
},
|
||||
} {
|
||||
By(test.description)
|
||||
if test.messageNum > 0 {
|
||||
By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message))
|
||||
err := injectLog(hostLogFile, test.timestamp, test.message, test.messageNum)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Wait for %d events generated", test.events))
|
||||
Eventually(func() error {
|
||||
return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.events, tempReason, tempMessage)
|
||||
}, pollTimeout, pollInterval).Should(Succeed())
|
||||
By(fmt.Sprintf("Make sure only %d events generated", test.events))
|
||||
Consistently(func() error {
|
||||
return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.events, tempReason, tempMessage)
|
||||
}, pollConsistent, pollInterval).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("Make sure node condition %q is set", condition))
|
||||
Eventually(func() error {
|
||||
return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
|
||||
}, pollTimeout, pollInterval).Should(Succeed())
|
||||
By(fmt.Sprintf("Make sure node condition %q is stable", condition))
|
||||
Consistently(func() error {
|
||||
return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
|
||||
}, pollConsistent, pollInterval).Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
|
||||
By("Get node problem detector log")
|
||||
log, err := framework.GetPodLogs(c, ns, name, name)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
framework.Logf("Node Problem Detector logs:\n %s", log)
|
||||
}
|
||||
By("Delete the node problem detector")
|
||||
f.PodClient().Delete(name, metav1.NewDeleteOptions(0))
|
||||
By("Wait for the node problem detector to disappear")
|
||||
Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
|
||||
By("Delete the config map")
|
||||
c.CoreV1().ConfigMaps(ns).Delete(configName, nil)
|
||||
By("Clean up the events")
|
||||
Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
|
||||
By("Clean up the node condition")
|
||||
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
|
||||
c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// injectLog injects kernel log into specified file.
|
||||
func injectLog(file string, timestamp time.Time, log string, num int) error {
|
||||
f, err := os.OpenFile(file, os.O_RDWR|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
for i := 0; i < num; i++ {
|
||||
_, err := f.WriteString(fmt.Sprintf("%s kernel: [0.000000] %s\n", timestamp.Format(time.Stamp), log))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNodeTime gets node boot time and current time.
|
||||
func getNodeTime() (time.Time, time.Time, error) {
|
||||
// Get node current time.
|
||||
nodeTime := time.Now()
|
||||
|
||||
// Get system uptime.
|
||||
var info syscall.Sysinfo_t
|
||||
if err := syscall.Sysinfo(&info); err != nil {
|
||||
return time.Time{}, time.Time{}, err
|
||||
}
|
||||
// Get node boot time. NOTE that because we get node current time before uptime, the boot time
|
||||
// calculated will be a little earlier than the real boot time. This won't affect the correctness
|
||||
// of the test result.
|
||||
bootTime := nodeTime.Add(-time.Duration(info.Uptime) * time.Second)
|
||||
|
||||
return nodeTime, bootTime, nil
|
||||
}
|
||||
|
||||
// verifyEvents verifies there are num specific events generated
|
||||
func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error {
|
||||
events, err := e.List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count := 0
|
||||
for _, event := range events.Items {
|
||||
if event.Reason != reason || event.Message != message {
|
||||
return fmt.Errorf("unexpected event: %v", event)
|
||||
}
|
||||
count += int(event.Count)
|
||||
}
|
||||
if count != num {
|
||||
return fmt.Errorf("expect event number %d, got %d: %v", num, count, events.Items)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyNoEvents verifies there is no event generated
|
||||
func verifyNoEvents(e coreclientset.EventInterface, options metav1.ListOptions) error {
|
||||
events, err := e.List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(events.Items) != 0 {
|
||||
return fmt.Errorf("unexpected events: %v", events.Items)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyNodeCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked
|
||||
func verifyNodeCondition(n coreclientset.NodeInterface, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error {
|
||||
node, err := n.Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, c := nodeutil.GetNodeCondition(&node.Status, condition)
|
||||
if c == nil {
|
||||
return fmt.Errorf("node condition %q not found", condition)
|
||||
}
|
||||
if c.Status != status || c.Reason != reason || c.Message != message {
|
||||
return fmt.Errorf("unexpected node condition %q: %+v", condition, c)
|
||||
}
|
||||
return nil
|
||||
}
|
25
vendor/k8s.io/kubernetes/test/e2e_node/perftype/BUILD
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/test/e2e_node/perftype/BUILD
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["perftype.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/perftype",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
34
vendor/k8s.io/kubernetes/test/e2e_node/perftype/perftype.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/test/e2e_node/perftype/perftype.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package perftype
|
||||
|
||||
// ResourceSeries defines the time series of the resource usage.
|
||||
type ResourceSeries struct {
|
||||
Timestamp []int64 `json:"ts"`
|
||||
CPUUsageInMilliCores []int64 `json:"cpu"`
|
||||
MemoryRSSInMegaBytes []int64 `json:"memory"`
|
||||
Units map[string]string `json:"unit"`
|
||||
}
|
||||
|
||||
// NodeTimeSeries defines the time series of the operations and the resource
|
||||
// usage.
|
||||
type NodeTimeSeries struct {
|
||||
OperationData map[string][]int64 `json:"op_series,omitempty"`
|
||||
ResourceData map[string]*ResourceSeries `json:"resource_series,omitempty"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Version string `json:"version"`
|
||||
}
|
306
vendor/k8s.io/kubernetes/test/e2e_node/pods_container_manager_test.go
generated
vendored
Normal file
306
vendor/k8s.io/kubernetes/test/e2e_node/pods_container_manager_test.go
generated
vendored
Normal file
@ -0,0 +1,306 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// getResourceList returns a ResourceList with the
|
||||
// specified cpu and memory resource values
|
||||
func getResourceList(cpu, memory string) v1.ResourceList {
|
||||
res := v1.ResourceList{}
|
||||
if cpu != "" {
|
||||
res[v1.ResourceCPU] = resource.MustParse(cpu)
|
||||
}
|
||||
if memory != "" {
|
||||
res[v1.ResourceMemory] = resource.MustParse(memory)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// getResourceRequirements returns a ResourceRequirements object
|
||||
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
|
||||
res := v1.ResourceRequirements{}
|
||||
res.Requests = requests
|
||||
res.Limits = limits
|
||||
return res
|
||||
}
|
||||
|
||||
const (
|
||||
// Kubelet internal cgroup name for node allocatable cgroup.
|
||||
defaultNodeAllocatableCgroup = "kubepods"
|
||||
// Kubelet internal cgroup name for burstable tier
|
||||
burstableCgroup = "burstable"
|
||||
// Kubelet internal cgroup name for besteffort tier
|
||||
bestEffortCgroup = "besteffort"
|
||||
)
|
||||
|
||||
// makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups.
|
||||
func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *v1.Pod {
|
||||
// convert the names to their literal cgroupfs forms...
|
||||
cgroupFsNames := []string{}
|
||||
for _, cgroupName := range cgroupNames {
|
||||
// Add top level cgroup used to enforce node allocatable.
|
||||
cgroupName = cm.CgroupName(path.Join(defaultNodeAllocatableCgroup, string(cgroupName)))
|
||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
||||
cgroupFsNames = append(cgroupFsNames, cm.ConvertCgroupNameToSystemd(cgroupName, true))
|
||||
} else {
|
||||
cgroupFsNames = append(cgroupFsNames, string(cgroupName))
|
||||
}
|
||||
}
|
||||
glog.Infof("expecting %v cgroups to be found", cgroupFsNames)
|
||||
// build the pod command to either verify cgroups exist
|
||||
command := ""
|
||||
for _, cgroupFsName := range cgroupFsNames {
|
||||
localCommand := "if [ ! -d /tmp/memory/" + cgroupFsName + " ] || [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 1; fi; "
|
||||
command += localCommand
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", command},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// makePodToVerifyCgroupRemoved verfies the specified cgroup does not exist.
|
||||
func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *v1.Pod {
|
||||
cgroupFsName := string(cgroupName)
|
||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
||||
cgroupFsName = cm.ConvertCgroupNameToSystemd(cm.CgroupName(cgroupName), true)
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", "for i in `seq 1 10`; do if [ ! -d /tmp/memory/" + cgroupFsName + " ] && [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-cgroup-manager")
|
||||
Describe("QOS containers", func() {
|
||||
Context("On enabling QOS cgroup hierarchy", func() {
|
||||
It("Top level QoS containers should have been created", func() {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName(burstableCgroup), cm.CgroupName(bestEffortCgroup)}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Pod containers", func() {
|
||||
Context("On scheduling a Guaranteed Pod", func() {
|
||||
It("Pod containers should have been created under the cgroup-root", func() {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
var (
|
||||
guaranteedPod *v1.Pod
|
||||
podUID string
|
||||
)
|
||||
By("Creating a Guaranteed pod in Namespace", func() {
|
||||
guaranteedPod = f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
podUID = string(guaranteedPod.UID)
|
||||
})
|
||||
By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("pod" + podUID)}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("pod" + podUID))
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
Context("On scheduling a BestEffort Pod", func() {
|
||||
It("Pod containers should have been created under the BestEffort cgroup", func() {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
var (
|
||||
podUID string
|
||||
bestEffortPod *v1.Pod
|
||||
)
|
||||
By("Creating a BestEffort pod in Namespace", func() {
|
||||
bestEffortPod = f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
podUID = string(bestEffortPod.UID)
|
||||
})
|
||||
By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("besteffort/pod" + podUID)}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("besteffort/pod" + podUID))
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
Context("On scheduling a Burstable Pod", func() {
|
||||
It("Pod containers should have been created under the Burstable cgroup", func() {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
var (
|
||||
podUID string
|
||||
burstablePod *v1.Pod
|
||||
)
|
||||
By("Creating a Burstable pod in Namespace", func() {
|
||||
burstablePod = f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
podUID = string(burstablePod.UID)
|
||||
})
|
||||
By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName("burstable/pod" + podUID)}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("burstable/pod" + podUID))
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
37
vendor/k8s.io/kubernetes/test/e2e_node/remote/BUILD
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/test/e2e_node/remote/BUILD
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"node_conformance.go",
|
||||
"node_e2e.go",
|
||||
"remote.go",
|
||||
"ssh.go",
|
||||
"types.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/remote",
|
||||
deps = [
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
303
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go
generated
vendored
Normal file
303
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
)
|
||||
|
||||
// ConformanceRemote contains the specific functions in the node conformance test suite.
|
||||
type ConformanceRemote struct{}
|
||||
|
||||
func InitConformanceRemote() TestSuite {
|
||||
return &ConformanceRemote{}
|
||||
}
|
||||
|
||||
// getConformanceDirectory gets node conformance test build directory.
|
||||
func getConformanceDirectory() (string, error) {
|
||||
k8sRoot, err := builder.GetK8sRootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(k8sRoot, "test", "e2e_node", "conformance", "build"), nil
|
||||
}
|
||||
|
||||
// commandToString is a helper function which formats command to string.
|
||||
func commandToString(c *exec.Cmd) string {
|
||||
return strings.Join(append([]string{c.Path}, c.Args[1:]...), " ")
|
||||
}
|
||||
|
||||
// Image path constants.
|
||||
const (
|
||||
conformanceRegistry = "gcr.io/google_containers"
|
||||
conformanceArch = runtime.GOARCH
|
||||
conformanceTarfile = "node_conformance.tar"
|
||||
conformanceTestBinary = "e2e_node.test"
|
||||
conformanceImageLoadTimeout = time.Duration(30) * time.Second
|
||||
)
|
||||
|
||||
// timestamp is used as an unique id of current test.
|
||||
var timestamp = getTimestamp()
|
||||
|
||||
// getConformanceTestImageName returns name of the conformance test image given the system spec name.
|
||||
func getConformanceTestImageName(systemSpecName string) string {
|
||||
if systemSpecName == "" {
|
||||
return fmt.Sprintf("%s/node-test-%s:%s", conformanceRegistry, conformanceArch, timestamp)
|
||||
} else {
|
||||
return fmt.Sprintf("%s/node-test-%s-%s:%s", conformanceRegistry, systemSpecName, conformanceArch, timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
// buildConformanceTest builds node conformance test image tarball into binDir.
|
||||
func buildConformanceTest(binDir, systemSpecName string) error {
|
||||
// Get node conformance directory.
|
||||
conformancePath, err := getConformanceDirectory()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node conformance directory: %v", err)
|
||||
}
|
||||
// Build docker image.
|
||||
cmd := exec.Command("make", "-C", conformancePath, "BIN_DIR="+binDir,
|
||||
"REGISTRY="+conformanceRegistry,
|
||||
"ARCH="+conformanceArch,
|
||||
"VERSION="+timestamp,
|
||||
"SYSTEM_SPEC_NAME="+systemSpecName)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to build node conformance docker image: command - %q, error - %v, output - %q",
|
||||
commandToString(cmd), err, output)
|
||||
}
|
||||
// Save docker image into tar file.
|
||||
cmd = exec.Command("docker", "save", "-o", filepath.Join(binDir, conformanceTarfile), getConformanceTestImageName(systemSpecName))
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to save node conformance docker image into tar file: command - %q, error - %v, output - %q",
|
||||
commandToString(cmd), err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupTestPackage sets up the test package with binaries k8s required for node conformance test
|
||||
func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) error {
|
||||
// Build the executables
|
||||
if err := builder.BuildGo(); err != nil {
|
||||
return fmt.Errorf("failed to build the depedencies: %v", err)
|
||||
}
|
||||
|
||||
// Make sure we can find the newly built binaries
|
||||
buildOutputDir, err := builder.GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes build output directory %v", err)
|
||||
}
|
||||
|
||||
// Build node conformance tarball.
|
||||
if err := buildConformanceTest(buildOutputDir, systemSpecName); err != nil {
|
||||
return fmt.Errorf("failed to build node conformance test: %v", err)
|
||||
}
|
||||
|
||||
// Copy files
|
||||
requiredFiles := []string{"kubelet", conformanceTestBinary, conformanceTarfile}
|
||||
for _, file := range requiredFiles {
|
||||
source := filepath.Join(buildOutputDir, file)
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return fmt.Errorf("failed to locate test file %s: %v", file, err)
|
||||
}
|
||||
output, err := exec.Command("cp", source, filepath.Join(tardir, file)).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %q: error - %v output - %q", file, err, output)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadConformanceImage loads node conformance image from tar file.
|
||||
func loadConformanceImage(host, workspace string) error {
|
||||
tarfile := filepath.Join(workspace, conformanceTarfile)
|
||||
if output, err := SSH(host, "timeout", conformanceImageLoadTimeout.String(),
|
||||
"docker", "load", "-i", tarfile); err != nil {
|
||||
return fmt.Errorf("failed to load node conformance image from tar file %q: error - %v output - %q",
|
||||
tarfile, err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kubeletLauncherLog is the log of kubelet launcher.
|
||||
const kubeletLauncherLog = "kubelet-launcher.log"
|
||||
|
||||
// kubeletPodManifestPath is a fixed known pod manifest path. We can not use the random pod
|
||||
// manifest directory generated in e2e_node.test because we need to mount the directory into
|
||||
// the conformance test container, it's easier if it's a known directory.
|
||||
// TODO(random-liu): Get rid of this once we switch to cluster e2e node bootstrap script.
|
||||
var kubeletPodManifestPath = "conformance-pod-manifest-" + timestamp
|
||||
|
||||
// getPodManifestPath returns pod manifest full path.
|
||||
func getPodManifestPath(workspace string) string {
|
||||
return filepath.Join(workspace, kubeletPodManifestPath)
|
||||
}
|
||||
|
||||
// isSystemd returns whether the node is a systemd node.
|
||||
func isSystemd(host string) (bool, error) {
|
||||
// Returns "systemd" if /run/systemd/system is found, empty string otherwise.
|
||||
output, err := SSH(host, "test", "-e", "/run/systemd/system", "&&", "echo", "systemd", "||", "true")
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to check systemd: error - %v output - %q", err, output)
|
||||
}
|
||||
return strings.TrimSpace(output) != "", nil
|
||||
}
|
||||
|
||||
// launchKubelet launches kubelet by running e2e_node.test binary in run-kubelet-mode.
|
||||
// This is a temporary solution, we should change node e2e to use the same node bootstrap
|
||||
// with cluster e2e and launch kubelet outside of the test for both regular node e2e and
|
||||
// node conformance test.
|
||||
// TODO(random-liu): Switch to use standard node bootstrap script.
|
||||
func launchKubelet(host, workspace, results, testArgs string) error {
|
||||
podManifestPath := getPodManifestPath(workspace)
|
||||
if output, err := SSH(host, "mkdir", podManifestPath); err != nil {
|
||||
return fmt.Errorf("failed to create kubelet pod manifest path %q: error - %v output - %q",
|
||||
podManifestPath, err, output)
|
||||
}
|
||||
startKubeletCmd := fmt.Sprintf("./%s --run-kubelet-mode --logtostderr --node-name=%s"+
|
||||
" --report-dir=%s %s --kubelet-flags=--pod-manifest-path=%s > %s 2>&1",
|
||||
conformanceTestBinary, host, results, testArgs, podManifestPath, filepath.Join(results, kubeletLauncherLog))
|
||||
var cmd []string
|
||||
systemd, err := isSystemd(host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check systemd: %v", err)
|
||||
}
|
||||
if systemd {
|
||||
cmd = []string{
|
||||
"systemd-run", "sh", "-c", getSSHCommand(" && ",
|
||||
// Switch to workspace.
|
||||
fmt.Sprintf("cd %s", workspace),
|
||||
// Launch kubelet by running e2e_node.test in run-kubelet-mode.
|
||||
startKubeletCmd,
|
||||
),
|
||||
}
|
||||
} else {
|
||||
cmd = []string{
|
||||
"sh", "-c", getSSHCommand(" && ",
|
||||
// Switch to workspace.
|
||||
fmt.Sprintf("cd %s", workspace),
|
||||
// Launch kubelet by running e2e_node.test in run-kubelet-mode with nohup.
|
||||
fmt.Sprintf("(nohup %s &)", startKubeletCmd),
|
||||
),
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("Launch kubelet with command: %v", cmd)
|
||||
output, err := SSH(host, cmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to launch kubelet with command %v: error - %v output - %q",
|
||||
cmd, err, output)
|
||||
}
|
||||
glog.Info("Successfully launch kubelet")
|
||||
return nil
|
||||
}
|
||||
|
||||
// kubeletStopGracePeriod is the grace period to wait before forcibly killing kubelet.
|
||||
const kubeletStopGracePeriod = 10 * time.Second
|
||||
|
||||
// stopKubelet stops kubelet launcher and kubelet gracefully.
|
||||
func stopKubelet(host, workspace string) error {
|
||||
glog.Info("Gracefully stop kubelet launcher")
|
||||
if output, err := SSH(host, "pkill", conformanceTestBinary); err != nil {
|
||||
return fmt.Errorf("failed to gracefully stop kubelet launcher: error - %v output - %q",
|
||||
err, output)
|
||||
}
|
||||
glog.Info("Wait for kubelet launcher to stop")
|
||||
stopped := false
|
||||
for start := time.Now(); time.Since(start) < kubeletStopGracePeriod; time.Sleep(time.Second) {
|
||||
// Check whether the process is still running.
|
||||
output, err := SSH(host, "pidof", conformanceTestBinary, "||", "true")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check kubelet stopping: error - %v output -%q",
|
||||
err, output)
|
||||
}
|
||||
// Kubelet is stopped
|
||||
if strings.TrimSpace(output) == "" {
|
||||
stopped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !stopped {
|
||||
glog.Info("Forcibly stop kubelet")
|
||||
if output, err := SSH(host, "pkill", "-SIGKILL", conformanceTestBinary); err != nil {
|
||||
return fmt.Errorf("failed to forcibly stop kubelet: error - %v output - %q",
|
||||
err, output)
|
||||
}
|
||||
}
|
||||
glog.Info("Successfully stop kubelet")
|
||||
// Clean up the pod manifest path
|
||||
podManifestPath := getPodManifestPath(workspace)
|
||||
if output, err := SSH(host, "rm", "-f", filepath.Join(workspace, podManifestPath)); err != nil {
|
||||
return fmt.Errorf("failed to cleanup pod manifest directory %q: error - %v, output - %q",
|
||||
podManifestPath, err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunTest runs test on the node.
|
||||
func (c *ConformanceRemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, _, systemSpecName string, timeout time.Duration) (string, error) {
|
||||
// Install the cni plugins and add a basic CNI configuration.
|
||||
if err := setupCNI(host, workspace); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Configure iptables firewall rules.
|
||||
if err := configureFirewall(host); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Kill any running node processes.
|
||||
cleanupNodeProcesses(host)
|
||||
|
||||
// Load node conformance image.
|
||||
if err := loadConformanceImage(host, workspace); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Launch kubelet.
|
||||
if err := launchKubelet(host, workspace, results, testArgs); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Stop kubelet.
|
||||
defer func() {
|
||||
if err := stopKubelet(host, workspace); err != nil {
|
||||
// Only log an error if failed to stop kubelet because it is not critical.
|
||||
glog.Errorf("failed to stop kubelet: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Run the tests
|
||||
glog.V(2).Infof("Starting tests on %q", host)
|
||||
podManifestPath := getPodManifestPath(workspace)
|
||||
cmd := fmt.Sprintf("'timeout -k 30s %fs docker run --rm --privileged=true --net=host -v /:/rootfs -v %s:%s -v %s:/var/result -e TEST_ARGS=--report-prefix=%s %s'",
|
||||
timeout.Seconds(), podManifestPath, podManifestPath, results, junitFilePrefix, getConformanceTestImageName(systemSpecName))
|
||||
testOutput, err := SSH(host, "sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return testOutput, err
|
||||
}
|
||||
|
||||
return testOutput, nil
|
||||
}
|
173
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go
generated
vendored
Normal file
173
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
)
|
||||
|
||||
const (
|
||||
systemSpecPath = "test/e2e_node/system/specs"
|
||||
)
|
||||
|
||||
// NodeE2ERemote contains the specific functions in the node e2e test suite.
|
||||
type NodeE2ERemote struct{}
|
||||
|
||||
func InitNodeE2ERemote() TestSuite {
|
||||
// TODO: Register flags.
|
||||
return &NodeE2ERemote{}
|
||||
}
|
||||
|
||||
// SetupTestPackage sets up the test package with binaries k8s required for node e2e tests
|
||||
func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
|
||||
// Build the executables
|
||||
if err := builder.BuildGo(); err != nil {
|
||||
return fmt.Errorf("failed to build the depedencies: %v", err)
|
||||
}
|
||||
|
||||
// Make sure we can find the newly built binaries
|
||||
buildOutputDir, err := builder.GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes build output directory: %v", err)
|
||||
}
|
||||
|
||||
rootDir, err := builder.GetK8sRootDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes root directory: %v", err)
|
||||
}
|
||||
|
||||
// Copy binaries
|
||||
requiredBins := []string{"kubelet", "e2e_node.test", "ginkgo", "mounter"}
|
||||
for _, bin := range requiredBins {
|
||||
source := filepath.Join(buildOutputDir, bin)
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return fmt.Errorf("failed to locate test binary %s: %v", bin, err)
|
||||
}
|
||||
out, err := exec.Command("cp", source, filepath.Join(tardir, bin)).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %q: %v Output: %q", bin, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
if systemSpecName != "" {
|
||||
// Copy system spec file
|
||||
source := filepath.Join(rootDir, systemSpecPath, systemSpecName+".yaml")
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return fmt.Errorf("failed to locate system spec %q: %v", source, err)
|
||||
}
|
||||
out, err := exec.Command("cp", source, tardir).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy system spec %q: %v, output: %q", source, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dest is relative to the root of the tar
|
||||
func tarAddFile(tar, source, dest string) error {
|
||||
dir := filepath.Dir(dest)
|
||||
tardir := filepath.Join(tar, dir)
|
||||
tardest := filepath.Join(tar, dest)
|
||||
|
||||
out, err := exec.Command("mkdir", "-p", tardir).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create archive bin subdir %q, was dest for file %q. Err: %v. Output:\n%s", tardir, source, err, out)
|
||||
}
|
||||
out, err = exec.Command("cp", source, tardest).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy file %q to the archive bin subdir %q. Err: %v. Output:\n%s", source, tardir, err, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// prependCOSMounterFlag prepends the flag for setting the GCI mounter path to
|
||||
// args and returns the result.
|
||||
func prependCOSMounterFlag(args, host, workspace string) (string, error) {
|
||||
glog.V(2).Infof("GCI/COS node and GCI/COS mounter both detected, modifying --experimental-mounter-path accordingly")
|
||||
mounterPath := filepath.Join(workspace, "mounter")
|
||||
args = fmt.Sprintf("--kubelet-flags=--experimental-mounter-path=%s ", mounterPath) + args
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// prependMemcgNotificationFlag prepends the flag for enabling memcg
|
||||
// notification to args and returns the result.
|
||||
func prependMemcgNotificationFlag(args string) string {
|
||||
return "--kubelet-flags=--experimental-kernel-memcg-notification=true " + args
|
||||
}
|
||||
|
||||
// updateOSSpecificKubeletFlags updates the Kubelet args with OS specific
|
||||
// settings.
|
||||
func updateOSSpecificKubeletFlags(args, host, workspace string) (string, error) {
|
||||
output, err := SSH(host, "cat", "/etc/os-release")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("issue detecting node's OS via node's /etc/os-release. Err: %v, Output:\n%s", err, output)
|
||||
}
|
||||
switch {
|
||||
case strings.Contains(output, "ID=gci"), strings.Contains(output, "ID=cos"):
|
||||
args = prependMemcgNotificationFlag(args)
|
||||
return prependCOSMounterFlag(args, host, workspace)
|
||||
case strings.Contains(output, "ID=ubuntu"):
|
||||
return prependMemcgNotificationFlag(args), nil
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// RunTest runs test on the node.
|
||||
func (n *NodeE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName string, timeout time.Duration) (string, error) {
|
||||
// Install the cni plugins and add a basic CNI configuration.
|
||||
// TODO(random-liu): Do this in cloud init after we remove containervm test.
|
||||
if err := setupCNI(host, workspace); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Configure iptables firewall rules
|
||||
if err := configureFirewall(host); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Kill any running node processes
|
||||
cleanupNodeProcesses(host)
|
||||
|
||||
testArgs, err := updateOSSpecificKubeletFlags(testArgs, host, workspace)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
systemSpecFile := ""
|
||||
if systemSpecName != "" {
|
||||
systemSpecFile = systemSpecName + ".yaml"
|
||||
}
|
||||
|
||||
// Run the tests
|
||||
glog.V(2).Infof("Starting tests on %q", host)
|
||||
cmd := getSSHCommand(" && ",
|
||||
fmt.Sprintf("cd %s", workspace),
|
||||
fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --system-spec-name=%s --system-spec-file=%s --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s --image-description=\"%s\" %s",
|
||||
timeout.Seconds(), ginkgoArgs, systemSpecName, systemSpecFile, host, results, junitFilePrefix, imageDesc, testArgs),
|
||||
)
|
||||
return SSH(host, "sh", "-c", cmd)
|
||||
}
|
203
vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go
generated
vendored
Normal file
203
vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
var testTimeoutSeconds = flag.Duration("test-timeout", 45*time.Minute, "How long (in golang duration format) to wait for ginkgo tests to complete.")
|
||||
var resultsDir = flag.String("results-dir", "/tmp/", "Directory to scp test results to.")
|
||||
|
||||
const archiveName = "e2e_node_test.tar.gz"
|
||||
|
||||
func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) {
|
||||
glog.V(2).Infof("Building archive...")
|
||||
tardir, err := ioutil.TempDir("", "node-e2e-archive")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temporary directory %v.", err)
|
||||
}
|
||||
defer os.RemoveAll(tardir)
|
||||
|
||||
// Call the suite function to setup the test package.
|
||||
err = suite.SetupTestPackage(tardir, systemSpecName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to setup test package %q: %v", tardir, err)
|
||||
}
|
||||
|
||||
// Build the tar
|
||||
out, err := exec.Command("tar", "-zcvf", archiveName, "-C", tardir, ".").CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to build tar %v. Output:\n%s", err, out)
|
||||
}
|
||||
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get working directory %v.", err)
|
||||
}
|
||||
return filepath.Join(dir, archiveName), nil
|
||||
}
|
||||
|
||||
// Returns the command output, whether the exit was ok, and any errors
|
||||
// TODO(random-liu): junitFilePrefix is not prefix actually, the file name is junit-junitFilePrefix.xml. Change the variable name.
|
||||
func RunRemote(suite TestSuite, archive string, host string, cleanup bool, imageDesc, junitFilePrefix string, testArgs string, ginkgoArgs string, systemSpecName string) (string, bool, error) {
|
||||
// Create the temp staging directory
|
||||
glog.V(2).Infof("Staging test binaries on %q", host)
|
||||
workspace := fmt.Sprintf("/tmp/node-e2e-%s", getTimestamp())
|
||||
// Do not sudo here, so that we can use scp to copy test archive to the directdory.
|
||||
if output, err := SSHNoSudo(host, "mkdir", workspace); err != nil {
|
||||
// Exit failure with the error
|
||||
return "", false, fmt.Errorf("failed to create workspace directory %q on host %q: %v output: %q", workspace, host, err, output)
|
||||
}
|
||||
if cleanup {
|
||||
defer func() {
|
||||
output, err := SSH(host, "rm", "-rf", workspace)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to cleanup workspace %q on host %q: %v. Output:\n%s", workspace, host, err, output)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Copy the archive to the staging directory
|
||||
if output, err := runSSHCommand("scp", archive, fmt.Sprintf("%s:%s/", GetHostnameOrIp(host), workspace)); err != nil {
|
||||
// Exit failure with the error
|
||||
return "", false, fmt.Errorf("failed to copy test archive: %v, output: %q", err, output)
|
||||
}
|
||||
|
||||
// Extract the archive
|
||||
cmd := getSSHCommand(" && ",
|
||||
fmt.Sprintf("cd %s", workspace),
|
||||
fmt.Sprintf("tar -xzvf ./%s", archiveName),
|
||||
)
|
||||
glog.V(2).Infof("Extracting tar on %q", host)
|
||||
// Do not use sudo here, because `sudo tar -x` will recover the file ownership inside the tar ball, but
|
||||
// we want the extracted files to be owned by the current user.
|
||||
if output, err := SSHNoSudo(host, "sh", "-c", cmd); err != nil {
|
||||
// Exit failure with the error
|
||||
return "", false, fmt.Errorf("failed to extract test archive: %v, output: %q", err, output)
|
||||
}
|
||||
|
||||
// Create the test result directory.
|
||||
resultDir := filepath.Join(workspace, "results")
|
||||
if output, err := SSHNoSudo(host, "mkdir", resultDir); err != nil {
|
||||
// Exit failure with the error
|
||||
return "", false, fmt.Errorf("failed to create test result directory %q on host %q: %v output: %q", resultDir, host, err, output)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Running test on %q", host)
|
||||
output, err := suite.RunTest(host, workspace, resultDir, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, *testTimeoutSeconds)
|
||||
|
||||
aggErrs := []error{}
|
||||
// Do not log the output here, let the caller deal with the test output.
|
||||
if err != nil {
|
||||
aggErrs = append(aggErrs, err)
|
||||
collectSystemLog(host)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Copying test artifacts from %q", host)
|
||||
scpErr := getTestArtifacts(host, workspace)
|
||||
if scpErr != nil {
|
||||
aggErrs = append(aggErrs, scpErr)
|
||||
}
|
||||
|
||||
return output, len(aggErrs) == 0, utilerrors.NewAggregate(aggErrs)
|
||||
}
|
||||
|
||||
// timestampFormat is the timestamp format used in the node e2e directory name.
|
||||
const timestampFormat = "20060102T150405"
|
||||
|
||||
func getTimestamp() string {
|
||||
return fmt.Sprintf(time.Now().Format(timestampFormat))
|
||||
}
|
||||
|
||||
func getTestArtifacts(host, testDir string) error {
|
||||
logPath := filepath.Join(*resultsDir, host)
|
||||
if err := os.MkdirAll(logPath, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create log directory %q: %v", logPath, err)
|
||||
}
|
||||
// Copy logs to artifacts/hostname
|
||||
_, err := runSSHCommand("scp", "-r", fmt.Sprintf("%s:%s/results/*.log", GetHostnameOrIp(host), testDir), logPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Copy json files (if any) to artifacts.
|
||||
if _, err = SSH(host, "ls", fmt.Sprintf("%s/results/*.json", testDir)); err == nil {
|
||||
_, err = runSSHCommand("scp", "-r", fmt.Sprintf("%s:%s/results/*.json", GetHostnameOrIp(host), testDir), *resultsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Copy junit to the top of artifacts
|
||||
_, err = runSSHCommand("scp", fmt.Sprintf("%s:%s/results/junit*", GetHostnameOrIp(host), testDir), *resultsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// collectSystemLog is a temporary hack to collect system log when encountered on
|
||||
// unexpected error.
|
||||
func collectSystemLog(host string) {
|
||||
// Encountered an unexpected error. The remote test harness may not
|
||||
// have finished retrieved and stored all the logs in this case. Try
|
||||
// to get some logs for debugging purposes.
|
||||
// TODO: This is a best-effort, temporary hack that only works for
|
||||
// journald nodes. We should have a more robust way to collect logs.
|
||||
var (
|
||||
logName = "system.log"
|
||||
logPath = fmt.Sprintf("/tmp/%s-%s", getTimestamp(), logName)
|
||||
destPath = fmt.Sprintf("%s/%s-%s", *resultsDir, host, logName)
|
||||
)
|
||||
glog.V(2).Infof("Test failed unexpectedly. Attempting to retrieving system logs (only works for nodes with journald)")
|
||||
// Try getting the system logs from journald and store it to a file.
|
||||
// Don't reuse the original test directory on the remote host because
|
||||
// it could've be been removed if the node was rebooted.
|
||||
if output, err := SSH(host, "sh", "-c", fmt.Sprintf("'journalctl --system --all > %s'", logPath)); err == nil {
|
||||
glog.V(2).Infof("Got the system logs from journald; copying it back...")
|
||||
if output, err := runSSHCommand("scp", fmt.Sprintf("%s:%s", GetHostnameOrIp(host), logPath), destPath); err != nil {
|
||||
glog.V(2).Infof("Failed to copy the log: err: %v, output: %q", err, output)
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("Failed to run journactl (normal if it doesn't exist on the node): %v, output: %q", err, output)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteLog is a temporary function to make it possible to write log
|
||||
// in the runner. This is used to collect serial console log.
|
||||
// TODO(random-liu): Use the log-dump script in cluster e2e.
|
||||
func WriteLog(host, filename, content string) error {
|
||||
logPath := filepath.Join(*resultsDir, host)
|
||||
if err := os.MkdirAll(logPath, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create log directory %q: %v", logPath, err)
|
||||
}
|
||||
f, err := os.Create(filepath.Join(logPath, filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.WriteString(content)
|
||||
return err
|
||||
}
|
111
vendor/k8s.io/kubernetes/test/e2e_node/remote/ssh.go
generated
vendored
Normal file
111
vendor/k8s.io/kubernetes/test/e2e_node/remote/ssh.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var sshOptions = flag.String("ssh-options", "", "Commandline options passed to ssh.")
|
||||
var sshEnv = flag.String("ssh-env", "", "Use predefined ssh options for environment. Options: gce")
|
||||
var sshKey = flag.String("ssh-key", "", "Path to ssh private key.")
|
||||
var sshUser = flag.String("ssh-user", "", "Use predefined user for ssh.")
|
||||
|
||||
var sshOptionsMap map[string]string
|
||||
var sshDefaultKeyMap map[string]string
|
||||
|
||||
func init() {
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
sshOptionsMap = map[string]string{
|
||||
"gce": "-o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o CheckHostIP=no -o StrictHostKeyChecking=no -o ServerAliveInterval=30 -o LogLevel=ERROR",
|
||||
}
|
||||
sshDefaultKeyMap = map[string]string{
|
||||
"gce": fmt.Sprintf("%s/.ssh/google_compute_engine", usr.HomeDir),
|
||||
}
|
||||
}
|
||||
|
||||
var hostnameIpOverrides = struct {
|
||||
sync.RWMutex
|
||||
m map[string]string
|
||||
}{m: make(map[string]string)}
|
||||
|
||||
func AddHostnameIp(hostname, ip string) {
|
||||
hostnameIpOverrides.Lock()
|
||||
defer hostnameIpOverrides.Unlock()
|
||||
hostnameIpOverrides.m[hostname] = ip
|
||||
}
|
||||
|
||||
// GetHostnameOrIp converts hostname into ip and apply user if necessary.
|
||||
func GetHostnameOrIp(hostname string) string {
|
||||
hostnameIpOverrides.RLock()
|
||||
defer hostnameIpOverrides.RUnlock()
|
||||
host := hostname
|
||||
if ip, found := hostnameIpOverrides.m[hostname]; found {
|
||||
host = ip
|
||||
}
|
||||
if *sshUser != "" {
|
||||
host = fmt.Sprintf("%s@%s", *sshUser, host)
|
||||
}
|
||||
return host
|
||||
}
|
||||
|
||||
// getSSHCommand handles proper quoting so that multiple commands are executed in the same shell over ssh
|
||||
func getSSHCommand(sep string, args ...string) string {
|
||||
return fmt.Sprintf("'%s'", strings.Join(args, sep))
|
||||
}
|
||||
|
||||
// SSH executes ssh command with runSSHCommand as root. The `sudo` makes sure that all commands
|
||||
// are executed by root, so that there won't be permission mismatch between different commands.
|
||||
func SSH(host string, cmd ...string) (string, error) {
|
||||
return runSSHCommand("ssh", append([]string{GetHostnameOrIp(host), "--", "sudo"}, cmd...)...)
|
||||
}
|
||||
|
||||
// SSHNoSudo executes ssh command with runSSHCommand as normal user. Sometimes we need this,
|
||||
// for example creating a directory that we'll copy files there with scp.
|
||||
func SSHNoSudo(host string, cmd ...string) (string, error) {
|
||||
return runSSHCommand("ssh", append([]string{GetHostnameOrIp(host), "--"}, cmd...)...)
|
||||
}
|
||||
|
||||
// runSSHCommand executes the ssh or scp command, adding the flag provided --ssh-options
|
||||
func runSSHCommand(cmd string, args ...string) (string, error) {
|
||||
if *sshKey != "" {
|
||||
args = append([]string{"-i", *sshKey}, args...)
|
||||
} else if key, found := sshDefaultKeyMap[*sshEnv]; found {
|
||||
args = append([]string{"-i", key}, args...)
|
||||
}
|
||||
if env, found := sshOptionsMap[*sshEnv]; found {
|
||||
args = append(strings.Split(env, " "), args...)
|
||||
}
|
||||
if *sshOptions != "" {
|
||||
args = append(strings.Split(*sshOptions, " "), args...)
|
||||
}
|
||||
output, err := exec.Command(cmd, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return string(output), fmt.Errorf("command [%s %s] failed with error: %v", cmd, strings.Join(args, " "), err)
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
49
vendor/k8s.io/kubernetes/test/e2e_node/remote/types.go
generated
vendored
Normal file
49
vendor/k8s.io/kubernetes/test/e2e_node/remote/types.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestSuite is the interface of a test suite, such as node e2e, node conformance,
|
||||
// node soaking, cri validation etc.
|
||||
type TestSuite interface {
|
||||
// SetupTestPackage setup the test package in the given directory. TestSuite
|
||||
// should put all necessary binaries and dependencies into the path. The caller
|
||||
// will:
|
||||
// * create a tarball with the directory.
|
||||
// * deploy the tarball to the testing host.
|
||||
// * untar the tarball to the testing workspace on the testing host.
|
||||
SetupTestPackage(path, systemSpecName string) error
|
||||
// RunTest runs test on the node in the given workspace and returns test output
|
||||
// and test error if there is any.
|
||||
// * host is the target node to run the test.
|
||||
// * workspace is the directory on the testing host the test is running in. Note
|
||||
// that the test package is unpacked in the workspace before running the test.
|
||||
// * results is the directory the test should write result into. All logs should be
|
||||
// saved as *.log, all junit file should start with junit*.
|
||||
// * imageDesc is the description of the image the test is running on.
|
||||
// It will be used for logging purpose only.
|
||||
// * junitFilePrefix is the prefix of output junit file.
|
||||
// * testArgs is the arguments passed to test.
|
||||
// * ginkgoArgs is the arguments passed to ginkgo.
|
||||
// * systemSpecName is the name of the system spec used for validating the
|
||||
// image on which the test runs.
|
||||
// * timeout is the test timeout.
|
||||
RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName string, timeout time.Duration) (string, error)
|
||||
}
|
133
vendor/k8s.io/kubernetes/test/e2e_node/remote/utils.go
generated
vendored
Normal file
133
vendor/k8s.io/kubernetes/test/e2e_node/remote/utils.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// utils.go contains functions used across test suites.
|
||||
|
||||
const (
|
||||
cniVersion = "v0.6.0"
|
||||
cniArch = "amd64"
|
||||
cniDirectory = "cni/bin" // The CNI tarball places binaries under directory under "cni/bin".
|
||||
cniConfDirectory = "cni/net.d"
|
||||
cniURL = "https://dl.k8s.io/network-plugins/cni-plugins-" + cniArch + "-" + cniVersion + ".tgz"
|
||||
)
|
||||
|
||||
const cniConfig = `{
|
||||
"name": "mynet",
|
||||
"type": "bridge",
|
||||
"bridge": "mynet0",
|
||||
"isDefaultGateway": true,
|
||||
"forceAddress": false,
|
||||
"ipMasq": true,
|
||||
"hairpinMode": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "10.10.0.0/16"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
// Install the cni plugin and add basic bridge configuration to the
|
||||
// configuration directory.
|
||||
func setupCNI(host, workspace string) error {
|
||||
glog.V(2).Infof("Install CNI on %q", host)
|
||||
cniPath := filepath.Join(workspace, cniDirectory)
|
||||
cmd := getSSHCommand(" ; ",
|
||||
fmt.Sprintf("mkdir -p %s", cniPath),
|
||||
fmt.Sprintf("wget -O - %s | tar -xz -C %s", cniURL, cniPath),
|
||||
)
|
||||
if output, err := SSH(host, "sh", "-c", cmd); err != nil {
|
||||
return fmt.Errorf("failed to install cni plugin on %q: %v output: %q", host, err, output)
|
||||
}
|
||||
|
||||
// The added CNI network config is not needed for kubenet. It is only
|
||||
// used when testing the CNI network plugin, but is added in both cases
|
||||
// for consistency and simplicity.
|
||||
glog.V(2).Infof("Adding CNI configuration on %q", host)
|
||||
cniConfigPath := filepath.Join(workspace, cniConfDirectory)
|
||||
cmd = getSSHCommand(" ; ",
|
||||
fmt.Sprintf("mkdir -p %s", cniConfigPath),
|
||||
fmt.Sprintf("echo %s > %s", quote(cniConfig), filepath.Join(cniConfigPath, "mynet.conf")),
|
||||
)
|
||||
if output, err := SSH(host, "sh", "-c", cmd); err != nil {
|
||||
return fmt.Errorf("failed to write cni configuration on %q: %v output: %q", host, err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// configureFirewall configures iptable firewall rules.
|
||||
func configureFirewall(host string) error {
|
||||
glog.V(2).Infof("Configure iptables firewall rules on %q", host)
|
||||
// TODO: consider calling bootstrap script to configure host based on OS
|
||||
output, err := SSH(host, "iptables", "-L", "INPUT")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get iptables INPUT on %q: %v output: %q", host, err, output)
|
||||
}
|
||||
if strings.Contains(output, "Chain INPUT (policy DROP)") {
|
||||
cmd := getSSHCommand("&&",
|
||||
"(iptables -C INPUT -w -p TCP -j ACCEPT || iptables -A INPUT -w -p TCP -j ACCEPT)",
|
||||
"(iptables -C INPUT -w -p UDP -j ACCEPT || iptables -A INPUT -w -p UDP -j ACCEPT)",
|
||||
"(iptables -C INPUT -w -p ICMP -j ACCEPT || iptables -A INPUT -w -p ICMP -j ACCEPT)")
|
||||
output, err := SSH(host, "sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to configured firewall on %q: %v output: %v", host, err, output)
|
||||
}
|
||||
}
|
||||
output, err = SSH(host, "iptables", "-L", "FORWARD")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get iptables FORWARD on %q: %v output: %q", host, err, output)
|
||||
}
|
||||
if strings.Contains(output, "Chain FORWARD (policy DROP)") {
|
||||
cmd := getSSHCommand("&&",
|
||||
"(iptables -C FORWARD -w -p TCP -j ACCEPT || iptables -A FORWARD -w -p TCP -j ACCEPT)",
|
||||
"(iptables -C FORWARD -w -p UDP -j ACCEPT || iptables -A FORWARD -w -p UDP -j ACCEPT)",
|
||||
"(iptables -C FORWARD -w -p ICMP -j ACCEPT || iptables -A FORWARD -w -p ICMP -j ACCEPT)")
|
||||
output, err = SSH(host, "sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to configured firewall on %q: %v output: %v", host, err, output)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupNodeProcesses kills all running node processes may conflict with the test.
|
||||
func cleanupNodeProcesses(host string) {
|
||||
glog.V(2).Infof("Killing any existing node processes on %q", host)
|
||||
cmd := getSSHCommand(" ; ",
|
||||
"pkill kubelet",
|
||||
"pkill kube-apiserver",
|
||||
"pkill etcd",
|
||||
"pkill e2e_node.test",
|
||||
)
|
||||
// No need to log an error if pkill fails since pkill will fail if the commands are not running.
|
||||
// If we are unable to stop existing running k8s processes, we should see messages in the kubelet/apiserver/etcd
|
||||
// logs about failing to bind the required ports.
|
||||
SSH(host, "sh", "-c", cmd)
|
||||
}
|
||||
|
||||
// Quotes a shell literal so it can be nested within another shell scope.
|
||||
func quote(s string) string {
|
||||
return fmt.Sprintf("'\"'\"'%s'\"'\"'", s)
|
||||
}
|
562
vendor/k8s.io/kubernetes/test/e2e_node/resource_collector.go
generated
vendored
Normal file
562
vendor/k8s.io/kubernetes/test/e2e_node/resource_collector.go
generated
vendored
Normal file
@ -0,0 +1,562 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
cadvisorclient "github.com/google/cadvisor/client/v2"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/perftype"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// resource monitoring
|
||||
cadvisorImageName = "google/cadvisor:latest"
|
||||
cadvisorPodName = "cadvisor"
|
||||
cadvisorPort = 8090
|
||||
// housekeeping interval of Cadvisor (second)
|
||||
houseKeepingInterval = 1
|
||||
)
|
||||
|
||||
var (
|
||||
systemContainers map[string]string
|
||||
)
|
||||
|
||||
type ResourceCollector struct {
|
||||
client *cadvisorclient.Client
|
||||
request *cadvisorapiv2.RequestOptions
|
||||
|
||||
pollingInterval time.Duration
|
||||
buffers map[string][]*framework.ContainerResourceUsage
|
||||
lock sync.RWMutex
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewResourceCollector creates a resource collector object which collects
|
||||
// resource usage periodically from Cadvisor
|
||||
func NewResourceCollector(interval time.Duration) *ResourceCollector {
|
||||
buffers := make(map[string][]*framework.ContainerResourceUsage)
|
||||
return &ResourceCollector{
|
||||
pollingInterval: interval,
|
||||
buffers: buffers,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts resource collector and connects to the standalone Cadvisor pod
|
||||
// then repeatedly runs collectStats.
|
||||
func (r *ResourceCollector) Start() {
|
||||
// Get the cgroup container names for kubelet and docker
|
||||
kubeletContainer, err := getContainerNameForProcess(kubeletProcessName, "")
|
||||
dockerContainer, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
|
||||
if err == nil {
|
||||
systemContainers = map[string]string{
|
||||
stats.SystemContainerKubelet: kubeletContainer,
|
||||
stats.SystemContainerRuntime: dockerContainer,
|
||||
}
|
||||
} else {
|
||||
framework.Failf("Failed to get docker container name in test-e2e-node resource collector.")
|
||||
}
|
||||
|
||||
wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
r.client, err = cadvisorclient.NewClient(fmt.Sprintf("http://localhost:%d/", cadvisorPort))
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
|
||||
Expect(r.client).NotTo(BeNil(), "cadvisor client not ready")
|
||||
|
||||
r.request = &cadvisorapiv2.RequestOptions{IdType: "name", Count: 1, Recursive: false}
|
||||
r.stopCh = make(chan struct{})
|
||||
|
||||
oldStatsMap := make(map[string]*cadvisorapiv2.ContainerStats)
|
||||
go wait.Until(func() { r.collectStats(oldStatsMap) }, r.pollingInterval, r.stopCh)
|
||||
}
|
||||
|
||||
// Stop stops resource collector collecting stats. It does not clear the buffer
|
||||
func (r *ResourceCollector) Stop() {
|
||||
close(r.stopCh)
|
||||
}
|
||||
|
||||
// Reset clears the stats buffer of resource collector.
|
||||
func (r *ResourceCollector) Reset() {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
for _, name := range systemContainers {
|
||||
r.buffers[name] = []*framework.ContainerResourceUsage{}
|
||||
}
|
||||
}
|
||||
|
||||
// GetCPUSummary gets CPU usage in percentile.
|
||||
func (r *ResourceCollector) GetCPUSummary() framework.ContainersCPUSummary {
|
||||
result := make(framework.ContainersCPUSummary)
|
||||
for key, name := range systemContainers {
|
||||
data := r.GetBasicCPUStats(name)
|
||||
result[key] = data
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// LogLatest logs the latest resource usage.
|
||||
func (r *ResourceCollector) LogLatest() {
|
||||
summary, err := r.GetLatest()
|
||||
if err != nil {
|
||||
framework.Logf("%v", err)
|
||||
}
|
||||
framework.Logf("%s", formatResourceUsageStats(summary))
|
||||
}
|
||||
|
||||
// collectStats collects resource usage from Cadvisor.
|
||||
func (r *ResourceCollector) collectStats(oldStatsMap map[string]*cadvisorapiv2.ContainerStats) {
|
||||
for _, name := range systemContainers {
|
||||
ret, err := r.client.Stats(name, r.request)
|
||||
if err != nil {
|
||||
framework.Logf("Error getting container stats, err: %v", err)
|
||||
return
|
||||
}
|
||||
cStats, ok := ret[name]
|
||||
if !ok {
|
||||
framework.Logf("Missing info/stats for container %q", name)
|
||||
return
|
||||
}
|
||||
|
||||
newStats := cStats.Stats[0]
|
||||
|
||||
if oldStats, ok := oldStatsMap[name]; ok && oldStats.Timestamp.Before(newStats.Timestamp) {
|
||||
if oldStats.Timestamp.Equal(newStats.Timestamp) {
|
||||
continue
|
||||
}
|
||||
r.buffers[name] = append(r.buffers[name], computeContainerResourceUsage(name, oldStats, newStats))
|
||||
}
|
||||
oldStatsMap[name] = newStats
|
||||
}
|
||||
}
|
||||
|
||||
// computeContainerResourceUsage computes resource usage based on new data sample.
|
||||
func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv2.ContainerStats) *framework.ContainerResourceUsage {
|
||||
return &framework.ContainerResourceUsage{
|
||||
Name: name,
|
||||
Timestamp: newStats.Timestamp,
|
||||
CPUUsageInCores: float64(newStats.Cpu.Usage.Total-oldStats.Cpu.Usage.Total) / float64(newStats.Timestamp.Sub(oldStats.Timestamp).Nanoseconds()),
|
||||
MemoryUsageInBytes: newStats.Memory.Usage,
|
||||
MemoryWorkingSetInBytes: newStats.Memory.WorkingSet,
|
||||
MemoryRSSInBytes: newStats.Memory.RSS,
|
||||
CPUInterval: newStats.Timestamp.Sub(oldStats.Timestamp),
|
||||
}
|
||||
}
|
||||
|
||||
// GetLatest gets the latest resource usage from stats buffer.
|
||||
func (r *ResourceCollector) GetLatest() (framework.ResourceUsagePerContainer, error) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
stats := make(framework.ResourceUsagePerContainer)
|
||||
for key, name := range systemContainers {
|
||||
contStats, ok := r.buffers[name]
|
||||
if !ok || len(contStats) == 0 {
|
||||
return nil, fmt.Errorf("No resource usage data for %s container (%s)", key, name)
|
||||
}
|
||||
stats[key] = contStats[len(contStats)-1]
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
type resourceUsageByCPU []*framework.ContainerResourceUsage
|
||||
|
||||
func (r resourceUsageByCPU) Len() int { return len(r) }
|
||||
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r resourceUsageByCPU) Less(i, j int) bool { return r[i].CPUUsageInCores < r[j].CPUUsageInCores }
|
||||
|
||||
// The percentiles to report.
|
||||
var percentiles = [...]float64{0.50, 0.90, 0.95, 0.99, 1.00}
|
||||
|
||||
// GetBasicCPUStats returns the percentiles the cpu usage in cores for
|
||||
// containerName. This method examines all data currently in the buffer.
|
||||
func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]float64 {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
result := make(map[float64]float64, len(percentiles))
|
||||
|
||||
// We must make a copy of array, otherwise the timeseries order is changed.
|
||||
usages := make([]*framework.ContainerResourceUsage, 0)
|
||||
for _, usage := range r.buffers[containerName] {
|
||||
usages = append(usages, usage)
|
||||
}
|
||||
|
||||
sort.Sort(resourceUsageByCPU(usages))
|
||||
for _, q := range percentiles {
|
||||
index := int(float64(len(usages))*q) - 1
|
||||
if index < 0 {
|
||||
// We don't have enough data.
|
||||
result[q] = 0
|
||||
continue
|
||||
}
|
||||
result[q] = usages[index].CPUUsageInCores
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer) string {
|
||||
// Example output:
|
||||
//
|
||||
// Resource usage for node "e2e-test-foo-node-abcde":
|
||||
// container cpu(cores) memory(MB)
|
||||
// "/" 0.363 2942.09
|
||||
// "/docker-daemon" 0.088 521.80
|
||||
// "/kubelet" 0.086 424.37
|
||||
// "/system" 0.007 119.88
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "container\tcpu(cores)\tmemory_working_set(MB)\tmemory_rss(MB)\n")
|
||||
for name, s := range containerStats {
|
||||
fmt.Fprintf(w, "%q\t%.3f\t%.2f\t%.2f\n", name, s.CPUUsageInCores, float64(s.MemoryWorkingSetInBytes)/(1024*1024), float64(s.MemoryRSSInBytes)/(1024*1024))
|
||||
}
|
||||
w.Flush()
|
||||
return fmt.Sprintf("Resource usage:\n%s", buf.String())
|
||||
}
|
||||
|
||||
func formatCPUSummary(summary framework.ContainersCPUSummary) string {
|
||||
// Example output for a node (the percentiles may differ):
|
||||
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
|
||||
// container 5th% 50th% 90th% 95th%
|
||||
// "/" 0.051 0.159 0.387 0.455
|
||||
// "/runtime 0.000 0.000 0.146 0.166
|
||||
// "/kubelet" 0.036 0.053 0.091 0.154
|
||||
// "/misc" 0.001 0.001 0.001 0.002
|
||||
var summaryStrings []string
|
||||
var header []string
|
||||
header = append(header, "container")
|
||||
for _, p := range percentiles {
|
||||
header = append(header, fmt.Sprintf("%.0fth%%", p*100))
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))
|
||||
|
||||
for _, containerName := range framework.TargetContainers() {
|
||||
var s []string
|
||||
s = append(s, fmt.Sprintf("%q", containerName))
|
||||
data, ok := summary[containerName]
|
||||
for _, p := range percentiles {
|
||||
value := "N/A"
|
||||
if ok {
|
||||
value = fmt.Sprintf("%.3f", data[p])
|
||||
}
|
||||
s = append(s, value)
|
||||
}
|
||||
fmt.Fprintf(w, "%s\n", strings.Join(s, "\t"))
|
||||
}
|
||||
w.Flush()
|
||||
summaryStrings = append(summaryStrings, fmt.Sprintf("CPU usage of containers:\n%s", buf.String()))
|
||||
|
||||
return strings.Join(summaryStrings, "\n")
|
||||
}
|
||||
|
||||
// createCadvisorPod creates a standalone cadvisor pod for fine-grain resource monitoring.
|
||||
func getCadvisorPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cadvisorPodName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// It uses a host port for the tests to collect data.
|
||||
// Currently we can not use port mapping in test-e2e-node.
|
||||
HostNetwork: true,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: cadvisorImageName,
|
||||
Name: cadvisorPodName,
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
HostPort: cadvisorPort,
|
||||
ContainerPort: cadvisorPort,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "sys",
|
||||
ReadOnly: true,
|
||||
MountPath: "/sys",
|
||||
},
|
||||
{
|
||||
Name: "var-run",
|
||||
ReadOnly: false,
|
||||
MountPath: "/var/run",
|
||||
},
|
||||
{
|
||||
Name: "docker",
|
||||
ReadOnly: true,
|
||||
MountPath: "/var/lib/docker/",
|
||||
},
|
||||
{
|
||||
Name: "rootfs",
|
||||
ReadOnly: true,
|
||||
MountPath: "/rootfs",
|
||||
},
|
||||
},
|
||||
Args: []string{
|
||||
"--profiling",
|
||||
fmt.Sprintf("--housekeeping_interval=%ds", houseKeepingInterval),
|
||||
fmt.Sprintf("--port=%d", cadvisorPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "rootfs",
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/"}},
|
||||
},
|
||||
{
|
||||
Name: "var-run",
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/var/run"}},
|
||||
},
|
||||
{
|
||||
Name: "sys",
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/sys"}},
|
||||
},
|
||||
{
|
||||
Name: "docker",
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/var/lib/docker"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// deletePodsSync deletes a list of pods and block until pods disappear.
|
||||
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
||||
var wg sync.WaitGroup
|
||||
for _, pod := range pods {
|
||||
wg.Add(1)
|
||||
go func(pod *v1.Pod) {
|
||||
defer wg.Done()
|
||||
|
||||
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
||||
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
|
||||
}(pod)
|
||||
}
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
// newTestPods creates a list of pods (specification) for test.
|
||||
func newTestPods(numPods int, volume bool, imageName, podType string) []*v1.Pod {
|
||||
var pods []*v1.Pod
|
||||
for i := 0; i < numPods; i++ {
|
||||
podName := "test-" + string(uuid.NewUUID())
|
||||
labels := map[string]string{
|
||||
"type": podType,
|
||||
"name": podName,
|
||||
}
|
||||
if volume {
|
||||
pods = append(pods,
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Restart policy is always (default).
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageName,
|
||||
Name: podName,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{MountPath: "/test-volume-mnt", Name: podName + "-volume"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{Name: podName + "-volume", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
} else {
|
||||
pods = append(pods,
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Restart policy is always (default).
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageName,
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
// GetResourceSeriesWithLabels gets the time series of resource usage of each container.
|
||||
func (r *ResourceCollector) GetResourceTimeSeries() map[string]*perftype.ResourceSeries {
|
||||
resourceSeries := make(map[string]*perftype.ResourceSeries)
|
||||
for key, name := range systemContainers {
|
||||
newSeries := &perftype.ResourceSeries{Units: map[string]string{
|
||||
"cpu": "mCPU",
|
||||
"memory": "MB",
|
||||
}}
|
||||
resourceSeries[key] = newSeries
|
||||
for _, usage := range r.buffers[name] {
|
||||
newSeries.Timestamp = append(newSeries.Timestamp, usage.Timestamp.UnixNano())
|
||||
newSeries.CPUUsageInMilliCores = append(newSeries.CPUUsageInMilliCores, int64(usage.CPUUsageInCores*1000))
|
||||
newSeries.MemoryRSSInMegaBytes = append(newSeries.MemoryRSSInMegaBytes, int64(float64(usage.MemoryUsageInBytes)/(1024*1024)))
|
||||
}
|
||||
}
|
||||
return resourceSeries
|
||||
}
|
||||
|
||||
// Code for getting container name of docker, copied from pkg/kubelet/cm/container_manager_linux.go
|
||||
// since they are not exposed
|
||||
const (
|
||||
kubeletProcessName = "kubelet"
|
||||
dockerProcessName = "docker"
|
||||
dockerPidFile = "/var/run/docker.pid"
|
||||
containerdProcessName = "docker-containerd"
|
||||
containerdPidFile = "/run/docker/libcontainerd/docker-containerd.pid"
|
||||
)
|
||||
|
||||
func getPidsForProcess(name, pidFile string) ([]int, error) {
|
||||
if len(pidFile) > 0 {
|
||||
if pid, err := getPidFromPidFile(pidFile); err == nil {
|
||||
return []int{pid}, nil
|
||||
} else {
|
||||
// log the error and fall back to pidof
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
}
|
||||
return procfs.PidOf(name)
|
||||
}
|
||||
|
||||
func getPidFromPidFile(pidFile string) (int, error) {
|
||||
file, err := os.Open(pidFile)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error opening pid file %s: %v", pidFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error reading pid file %s: %v", pidFile, err)
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(string(data))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error parsing %s as a number: %v", string(data), err)
|
||||
}
|
||||
|
||||
return pid, nil
|
||||
}
|
||||
|
||||
func getContainerNameForProcess(name, pidFile string) (string, error) {
|
||||
pids, err := getPidsForProcess(name, pidFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to detect process id for %q - %v", name, err)
|
||||
}
|
||||
if len(pids) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
cont, err := getContainer(pids[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return cont, nil
|
||||
}
|
||||
|
||||
// getContainer returns the cgroup associated with the specified pid.
|
||||
// It enforces a unified hierarchy for memory and cpu cgroups.
|
||||
// On systemd environments, it uses the name=systemd cgroup for the specified pid.
|
||||
func getContainer(pid int) (string, error) {
|
||||
cgs, err := cgroups.ParseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", pid))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cpu, found := cgs["cpu"]
|
||||
if !found {
|
||||
return "", cgroups.NewNotFoundError("cpu")
|
||||
}
|
||||
memory, found := cgs["memory"]
|
||||
if !found {
|
||||
return "", cgroups.NewNotFoundError("memory")
|
||||
}
|
||||
|
||||
// since we use this container for accounting, we need to ensure it is a unified hierarchy.
|
||||
if cpu != memory {
|
||||
return "", fmt.Errorf("cpu and memory cgroup hierarchy not unified. cpu: %s, memory: %s", cpu, memory)
|
||||
}
|
||||
|
||||
// on systemd, every pid is in a unified cgroup hierarchy (name=systemd as seen in systemd-cgls)
|
||||
// cpu and memory accounting is off by default, users may choose to enable it per unit or globally.
|
||||
// users could enable CPU and memory accounting globally via /etc/systemd/system.conf (DefaultCPUAccounting=true DefaultMemoryAccounting=true).
|
||||
// users could also enable CPU and memory accounting per unit via CPUAccounting=true and MemoryAccounting=true
|
||||
// we only warn if accounting is not enabled for CPU or memory so as to not break local development flows where kubelet is launched in a terminal.
|
||||
// for example, the cgroup for the user session will be something like /user.slice/user-X.slice/session-X.scope, but the cpu and memory
|
||||
// cgroup will be the closest ancestor where accounting is performed (most likely /) on systems that launch docker containers.
|
||||
// as a result, on those systems, you will not get cpu or memory accounting statistics for kubelet.
|
||||
// in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally).
|
||||
if systemd, found := cgs["name=systemd"]; found {
|
||||
if systemd != cpu {
|
||||
log.Printf("CPUAccounting not enabled for pid: %d", pid)
|
||||
}
|
||||
if systemd != memory {
|
||||
log.Printf("MemoryAccounting not enabled for pid: %d", pid)
|
||||
}
|
||||
return systemd, nil
|
||||
}
|
||||
|
||||
return cpu, nil
|
||||
}
|
292
vendor/k8s.io/kubernetes/test/e2e_node/resource_usage_test.go
generated
vendored
Normal file
292
vendor/k8s.io/kubernetes/test/e2e_node/resource_usage_test.go
generated
vendored
Normal file
@ -0,0 +1,292 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
||||
const (
|
||||
// Interval to poll /stats/container on a node
|
||||
containerStatsPollingPeriod = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
rc *ResourceCollector
|
||||
om *framework.RuntimeOperationMonitor
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("resource-usage")
|
||||
|
||||
BeforeEach(func() {
|
||||
om = framework.NewRuntimeOperationMonitor(f.ClientSet)
|
||||
// The test collects resource usage from a standalone Cadvisor pod.
|
||||
// The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to
|
||||
// show the resource usage spikes. But changing its interval increases the overhead
|
||||
// of kubelet. Hence we use a Cadvisor pod.
|
||||
f.PodClient().CreateSync(getCadvisorPod())
|
||||
rc = NewResourceCollector(containerStatsPollingPeriod)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
result := om.GetLatestRuntimeOperationErrorRate()
|
||||
framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
|
||||
})
|
||||
|
||||
// This test measures and verifies the steady resource usage of node is within limit
|
||||
// It collects data from a standalone Cadvisor with housekeeping interval 1s.
|
||||
// It verifies CPU percentiles and the lastest memory usage.
|
||||
Context("regular resource usage tracking", func() {
|
||||
rTests := []resourceTest{
|
||||
{
|
||||
podsNr: 10,
|
||||
cpuLimits: framework.ContainersCPUSummary{
|
||||
stats.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.35},
|
||||
stats.SystemContainerRuntime: {0.50: 0.30, 0.95: 0.40},
|
||||
},
|
||||
memLimits: framework.ResourceUsagePerContainer{
|
||||
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
|
||||
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 400 * 1024 * 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testArg := range rTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("resource tracking for %d pods per node", itArg.podsNr)
|
||||
It(desc, func() {
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
|
||||
runResourceUsageTest(f, rc, itArg)
|
||||
|
||||
// Log and verify resource usage
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Context("regular resource usage tracking", func() {
|
||||
rTests := []resourceTest{
|
||||
{
|
||||
podsNr: 0,
|
||||
},
|
||||
{
|
||||
podsNr: 10,
|
||||
},
|
||||
{
|
||||
podsNr: 35,
|
||||
},
|
||||
{
|
||||
podsNr: 105,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testArg := range rTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("resource tracking for %d pods per node [Benchmark]", itArg.podsNr)
|
||||
It(desc, func() {
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
|
||||
runResourceUsageTest(f, rc, itArg)
|
||||
|
||||
// Log and verify resource usage
|
||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
type resourceTest struct {
|
||||
podsNr int
|
||||
cpuLimits framework.ContainersCPUSummary
|
||||
memLimits framework.ResourceUsagePerContainer
|
||||
}
|
||||
|
||||
func (rt *resourceTest) getTestName() string {
|
||||
return fmt.Sprintf("resource_%d", rt.podsNr)
|
||||
}
|
||||
|
||||
// runResourceUsageTest runs the resource usage test
|
||||
func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg resourceTest) {
|
||||
const (
|
||||
// The monitoring time for one test
|
||||
monitoringTime = 10 * time.Minute
|
||||
// The periodic reporting period
|
||||
reportingPeriod = 5 * time.Minute
|
||||
// sleep for an interval here to measure steady data
|
||||
sleepAfterCreatePods = 10 * time.Second
|
||||
)
|
||||
pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), "test_pod")
|
||||
|
||||
rc.Start()
|
||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
||||
defer deletePodsSync(f, append(pods, getCadvisorPod()))
|
||||
defer rc.Stop()
|
||||
|
||||
By("Creating a batch of Pods")
|
||||
f.PodClient().CreateBatch(pods)
|
||||
|
||||
// wait for a while to let the node be steady
|
||||
time.Sleep(sleepAfterCreatePods)
|
||||
|
||||
// Log once and flush the stats.
|
||||
rc.LogLatest()
|
||||
rc.Reset()
|
||||
|
||||
By("Start monitoring resource usage")
|
||||
// Periodically dump the cpu summary until the deadline is met.
|
||||
// Note that without calling framework.ResourceMonitor.Reset(), the stats
|
||||
// would occupy increasingly more memory. This should be fine
|
||||
// for the current test duration, but we should reclaim the
|
||||
// entries if we plan to monitor longer (e.g., 8 hours).
|
||||
deadline := time.Now().Add(monitoringTime)
|
||||
for time.Now().Before(deadline) {
|
||||
timeLeft := deadline.Sub(time.Now())
|
||||
framework.Logf("Still running...%v left", timeLeft)
|
||||
if timeLeft < reportingPeriod {
|
||||
time.Sleep(timeLeft)
|
||||
} else {
|
||||
time.Sleep(reportingPeriod)
|
||||
}
|
||||
logPods(f.ClientSet)
|
||||
}
|
||||
|
||||
By("Reporting overall resource usage")
|
||||
logPods(f.ClientSet)
|
||||
}
|
||||
|
||||
// logAndVerifyResource prints the resource usage as perf data and verifies whether resource usage satisfies the limit.
|
||||
func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits framework.ContainersCPUSummary,
|
||||
memLimits framework.ResourceUsagePerContainer, testInfo map[string]string, isVerify bool) {
|
||||
nodeName := framework.TestContext.NodeName
|
||||
|
||||
// Obtain memory PerfData
|
||||
usagePerContainer, err := rc.GetLatest()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("%s", formatResourceUsageStats(usagePerContainer))
|
||||
|
||||
usagePerNode := make(framework.ResourceUsagePerNode)
|
||||
usagePerNode[nodeName] = usagePerContainer
|
||||
|
||||
// Obtain CPU PerfData
|
||||
cpuSummary := rc.GetCPUSummary()
|
||||
framework.Logf("%s", formatCPUSummary(cpuSummary))
|
||||
|
||||
cpuSummaryPerNode := make(framework.NodesCPUSummary)
|
||||
cpuSummaryPerNode[nodeName] = cpuSummary
|
||||
|
||||
// Print resource usage
|
||||
logPerfData(framework.ResourceUsageToPerfDataWithLabels(usagePerNode, testInfo), "memory")
|
||||
logPerfData(framework.CPUUsageToPerfDataWithLabels(cpuSummaryPerNode, testInfo), "cpu")
|
||||
|
||||
// Verify resource usage
|
||||
if isVerify {
|
||||
verifyMemoryLimits(f.ClientSet, memLimits, usagePerNode)
|
||||
verifyCPULimits(cpuLimits, cpuSummaryPerNode)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
|
||||
if expected == nil {
|
||||
return
|
||||
}
|
||||
var errList []string
|
||||
for nodeName, nodeSummary := range actual {
|
||||
var nodeErrs []string
|
||||
for cName, expectedResult := range expected {
|
||||
container, ok := nodeSummary[cName]
|
||||
if !ok {
|
||||
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing", cName))
|
||||
continue
|
||||
}
|
||||
|
||||
expectedValue := expectedResult.MemoryRSSInBytes
|
||||
actualValue := container.MemoryRSSInBytes
|
||||
if expectedValue != 0 && actualValue > expectedValue {
|
||||
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: expected RSS memory (MB) < %d; got %d",
|
||||
cName, expectedValue, actualValue))
|
||||
}
|
||||
}
|
||||
if len(nodeErrs) > 0 {
|
||||
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
|
||||
heapStats, err := framework.GetKubeletHeapStats(c, nodeName)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to get heap stats from %q", nodeName)
|
||||
} else {
|
||||
framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errList) > 0 {
|
||||
framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) {
|
||||
if expected == nil {
|
||||
return
|
||||
}
|
||||
var errList []string
|
||||
for nodeName, perNodeSummary := range actual {
|
||||
var nodeErrs []string
|
||||
for cName, expectedResult := range expected {
|
||||
perContainerSummary, ok := perNodeSummary[cName]
|
||||
if !ok {
|
||||
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing", cName))
|
||||
continue
|
||||
}
|
||||
for p, expectedValue := range expectedResult {
|
||||
actualValue, ok := perContainerSummary[p]
|
||||
if !ok {
|
||||
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing percentile %v", cName, p))
|
||||
continue
|
||||
}
|
||||
if actualValue > expectedValue {
|
||||
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: expected %.0fth%% usage < %.3f; got %.3f",
|
||||
cName, p*100, expectedValue, actualValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(nodeErrs) > 0 {
|
||||
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
|
||||
}
|
||||
}
|
||||
if len(errList) > 0 {
|
||||
framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
func logPods(c clientset.Interface) {
|
||||
nodeName := framework.TestContext.NodeName
|
||||
podList, err := framework.GetKubeletRunningPods(c, nodeName)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to retrieve kubelet pods for node %v", nodeName)
|
||||
}
|
||||
framework.Logf("%d pods are running on node %v", len(podList.Items), nodeName)
|
||||
}
|
122
vendor/k8s.io/kubernetes/test/e2e_node/restart_test.go
generated
vendored
Normal file
122
vendor/k8s.io/kubernetes/test/e2e_node/restart_test.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// waitForPods waits for timeout duration, for pod_count.
|
||||
// If the timeout is hit, it returns the list of currently running pods.
|
||||
func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*v1.Pod) {
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||
podList, err := f.PodClient().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list pods on node: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
runningPods = []*v1.Pod{}
|
||||
for _, pod := range podList.Items {
|
||||
if r, err := testutils.PodRunningReady(&pod); err != nil || !r {
|
||||
continue
|
||||
}
|
||||
runningPods = append(runningPods, &pod)
|
||||
}
|
||||
framework.Logf("Running pod count %d", len(runningPods))
|
||||
if len(runningPods) >= pod_count {
|
||||
break
|
||||
}
|
||||
}
|
||||
return runningPods
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
||||
const (
|
||||
// Saturate the node. It's not necessary that all these pods enter
|
||||
// Running/Ready, because we don't know the number of cores in the
|
||||
// test node or default limits applied (if any). It's is essential
|
||||
// that no containers end up in terminated. 100 was chosen because
|
||||
// it's the max pods per node.
|
||||
podCount = 100
|
||||
podCreationInterval = 100 * time.Millisecond
|
||||
recoverTimeout = 5 * time.Minute
|
||||
startTimeout = 3 * time.Minute
|
||||
// restartCount is chosen so even with minPods we exhaust the default
|
||||
// allocation of a /24.
|
||||
minPods = 50
|
||||
restartCount = 6
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("restart-test")
|
||||
Context("Docker Daemon", func() {
|
||||
Context("Network", func() {
|
||||
It("should recover from ip leak", func() {
|
||||
|
||||
pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
|
||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||
defer deletePodsSync(f, pods)
|
||||
|
||||
// Give the node some time to stabilize, assume pods that enter RunningReady within
|
||||
// startTimeout fit on the node and the node is now saturated.
|
||||
runningPods := waitForPods(f, podCount, startTimeout)
|
||||
if len(runningPods) < minPods {
|
||||
framework.Failf("Failed to start %d pods, cannot test that restarting docker doesn't leak IPs", minPods)
|
||||
}
|
||||
|
||||
for i := 0; i < restartCount; i += 1 {
|
||||
By(fmt.Sprintf("Restarting Docker Daemon iteration %d", i))
|
||||
|
||||
// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494
|
||||
if stdout, err := exec.Command("sudo", "systemctl", "restart", "docker").CombinedOutput(); err != nil {
|
||||
framework.Logf("Failed to trigger docker restart with systemd/systemctl: %v, stdout: %q", err, string(stdout))
|
||||
if stdout, err = exec.Command("sudo", "service", "docker", "restart").CombinedOutput(); err != nil {
|
||||
framework.Failf("Failed to trigger docker restart with upstart/service: %v, stdout: %q", err, string(stdout))
|
||||
}
|
||||
}
|
||||
time.Sleep(20 * time.Second)
|
||||
}
|
||||
|
||||
By("Checking currently Running/Ready pods")
|
||||
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
|
||||
if len(postRestartRunningPods) == 0 {
|
||||
framework.Failf("Failed to start *any* pods after docker restart, this might indicate an IP leak")
|
||||
}
|
||||
By("Confirm no containers have terminated")
|
||||
for _, pod := range postRestartRunningPods {
|
||||
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
|
||||
framework.Failf("Pod %q has failed containers %+v after docker restart, this might indicate an IP leak", pod.Name, c)
|
||||
}
|
||||
}
|
||||
By(fmt.Sprintf("Docker restart test passed with %d pods", len(postRestartRunningPods)))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
36
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/BUILD
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/BUILD
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "local",
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/runner/local",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["run_local.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/runner/local",
|
||||
deps = [
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
81
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go
generated
vendored
Normal file
81
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var buildDependencies = flag.Bool("build-dependencies", true, "If true, build all dependencies.")
|
||||
var ginkgoFlags = flag.String("ginkgo-flags", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
|
||||
var testFlags = flag.String("test-flags", "", "Space-separated list of arguments to pass to node e2e test.")
|
||||
var systemSpecName = flag.String("system-spec-name", "", "The name of the system spec used for validating the image in the node conformance test. The specs are at test/e2e_node/system/specs/. If unspecified, the default built-in spec (system.DefaultSpec) will be used.")
|
||||
|
||||
const (
|
||||
systemSpecPath = "test/e2e_node/system/specs"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// Build dependencies - ginkgo, kubelet and apiserver.
|
||||
if *buildDependencies {
|
||||
if err := builder.BuildGo(); err != nil {
|
||||
glog.Fatalf("Failed to build the dependencies: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run node e2e test
|
||||
outputDir, err := builder.GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get build output directory: %v", err)
|
||||
}
|
||||
glog.Infof("Got build output dir: %v", outputDir)
|
||||
ginkgo := filepath.Join(outputDir, "ginkgo")
|
||||
test := filepath.Join(outputDir, "e2e_node.test")
|
||||
|
||||
args := []string{*ginkgoFlags, test, "--", *testFlags}
|
||||
if *systemSpecName != "" {
|
||||
rootDir, err := builder.GetK8sRootDir()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get k8s root directory: %v", err)
|
||||
}
|
||||
systemSpecFile := filepath.Join(rootDir, systemSpecPath, *systemSpecName+".yaml")
|
||||
args = append(args, fmt.Sprintf("--system-spec-name=%s --system-spec-file=%s", *systemSpecName, systemSpecFile))
|
||||
}
|
||||
if err := runCommand(ginkgo, args...); err != nil {
|
||||
glog.Exitf("Test failed: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func runCommand(name string, args ...string) error {
|
||||
glog.Infof("Running command: %v %v", name, strings.Join(args, " "))
|
||||
cmd := exec.Command("sudo", "sh", "-c", strings.Join(append([]string{name}, args...), " "))
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
41
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/BUILD
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/BUILD
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "remote",
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/runner/remote",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["run_remote.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/runner/remote",
|
||||
deps = [
|
||||
"//test/e2e_node/remote:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
781
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go
generated
vendored
Normal file
781
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go
generated
vendored
Normal file
@ -0,0 +1,781 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// To run the node e2e tests remotely against one or more hosts on gce:
|
||||
// $ go run run_remote.go --logtostderr --v 2 --ssh-env gce --hosts <comma separated hosts>
|
||||
// To run the node e2e tests remotely against one or more images on gce and provision them:
|
||||
// $ go run run_remote.go --logtostderr --v 2 --project <project> --zone <zone> --ssh-env gce --images <comma separated images>
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/remote"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pborman/uuid"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
compute "google.golang.org/api/compute/v0.beta"
|
||||
)
|
||||
|
||||
var testArgs = flag.String("test_args", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
|
||||
var testSuite = flag.String("test-suite", "default", "Test suite the runner initializes with. Currently support default|conformance")
|
||||
var instanceNamePrefix = flag.String("instance-name-prefix", "", "prefix for instance names")
|
||||
var zone = flag.String("zone", "", "gce zone the hosts live in")
|
||||
var project = flag.String("project", "", "gce project the hosts live in")
|
||||
var imageConfigFile = flag.String("image-config-file", "", "yaml file describing images to run")
|
||||
var imageConfigDir = flag.String("image-config-dir", "", "(optional)path to image config files")
|
||||
var imageProject = flag.String("image-project", "", "gce project the hosts live in")
|
||||
var images = flag.String("images", "", "images to test")
|
||||
var hosts = flag.String("hosts", "", "hosts to test")
|
||||
var cleanup = flag.Bool("cleanup", true, "If true remove files from remote hosts and delete temporary instances")
|
||||
var deleteInstances = flag.Bool("delete-instances", true, "If true, delete any instances created")
|
||||
var buildOnly = flag.Bool("build-only", false, "If true, build e2e_node_test.tar.gz and exit.")
|
||||
var instanceMetadata = flag.String("instance-metadata", "", "key/value metadata for instances separated by '=' or '<', 'k=v' means the key is 'k' and the value is 'v'; 'k<p' means the key is 'k' and the value is extracted from the local path 'p', e.g. k1=v1,k2<p2")
|
||||
var gubernator = flag.Bool("gubernator", false, "If true, output Gubernator link to view logs")
|
||||
var ginkgoFlags = flag.String("ginkgo-flags", "", "Passed to ginkgo to specify additional flags such as --skip=.")
|
||||
var systemSpecName = flag.String("system-spec-name", "", "The name of the system spec used for validating the image in the node conformance test. The specs are at test/e2e_node/system/specs/. If unspecified, the default built-in spec (system.DefaultSpec) will be used.")
|
||||
|
||||
// envs is the type used to collect all node envs. The key is the env name,
|
||||
// and the value is the env value
|
||||
type envs map[string]string
|
||||
|
||||
// String function of flag.Value
|
||||
func (e *envs) String() string {
|
||||
return fmt.Sprint(*e)
|
||||
}
|
||||
|
||||
// Set function of flag.Value
|
||||
func (e *envs) Set(value string) error {
|
||||
kv := strings.SplitN(value, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return fmt.Errorf("invalid env string")
|
||||
}
|
||||
emap := *e
|
||||
emap[kv[0]] = kv[1]
|
||||
return nil
|
||||
}
|
||||
|
||||
// nodeEnvs is the node envs from the flag `node-env`.
|
||||
var nodeEnvs = make(envs)
|
||||
|
||||
func init() {
|
||||
flag.Var(&nodeEnvs, "node-env", "An environment variable passed to instance as metadata, e.g. when '--node-env=PATH=/usr/bin' is specified, there will be an extra instance metadata 'PATH=/usr/bin'.")
|
||||
}
|
||||
|
||||
const (
|
||||
defaultMachine = "n1-standard-1"
|
||||
acceleratorTypeResourceFormat = "https://www.googleapis.com/compute/beta/projects/%s/zones/%s/acceleratorTypes/%s"
|
||||
)
|
||||
|
||||
var (
|
||||
computeService *compute.Service
|
||||
arc Archive
|
||||
suite remote.TestSuite
|
||||
)
|
||||
|
||||
type Archive struct {
|
||||
sync.Once
|
||||
path string
|
||||
err error
|
||||
}
|
||||
|
||||
type TestResult struct {
|
||||
output string
|
||||
err error
|
||||
host string
|
||||
exitOk bool
|
||||
}
|
||||
|
||||
// ImageConfig specifies what images should be run and how for these tests.
|
||||
// It can be created via the `--images` and `--image-project` flags, or by
|
||||
// specifying the `--image-config-file` flag, pointing to a json or yaml file
|
||||
// of the form:
|
||||
//
|
||||
// images:
|
||||
// short-name:
|
||||
// image: gce-image-name
|
||||
// project: gce-image-project
|
||||
// machine: for benchmark only, the machine type (GCE instance) to run test
|
||||
// tests: for benchmark only, a list of ginkgo focus strings to match tests
|
||||
|
||||
// TODO(coufon): replace 'image' with 'node' in configurations
|
||||
// and we plan to support testing custom machines other than GCE by specifying host
|
||||
type ImageConfig struct {
|
||||
Images map[string]GCEImage `json:"images"`
|
||||
}
|
||||
|
||||
type Accelerator struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Count int64 `json:"count, omitempty"`
|
||||
}
|
||||
|
||||
type Resources struct {
|
||||
Accelerators []Accelerator `json:"accelerators,omitempty"`
|
||||
}
|
||||
|
||||
type GCEImage struct {
|
||||
Image string `json:"image, omitempty"`
|
||||
ImageDesc string `json:"image_description, omitempty"`
|
||||
Project string `json:"project"`
|
||||
Metadata string `json:"metadata"`
|
||||
ImageRegex string `json:"image_regex, omitempty"`
|
||||
// Defaults to using only the latest image. Acceptible values are [0, # of images that match the regex).
|
||||
// If the number of existing previous images is lesser than what is desired, the test will use that is available.
|
||||
PreviousImages int `json:"previous_images, omitempty"`
|
||||
|
||||
Machine string `json:"machine, omitempty"`
|
||||
Resources Resources `json:"resources, omitempty"`
|
||||
// This test is for benchmark (no limit verification, more result log, node name has format 'machine-image-uuid') if 'Tests' is non-empty.
|
||||
Tests []string `json:"tests, omitempty"`
|
||||
}
|
||||
|
||||
type internalImageConfig struct {
|
||||
images map[string]internalGCEImage
|
||||
}
|
||||
|
||||
type internalGCEImage struct {
|
||||
image string
|
||||
// imageDesc is the description of the image. If empty, the value in the
|
||||
// 'image' will be used.
|
||||
imageDesc string
|
||||
project string
|
||||
resources Resources
|
||||
metadata *compute.Metadata
|
||||
machine string
|
||||
tests []string
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
switch *testSuite {
|
||||
case "conformance":
|
||||
suite = remote.InitConformanceRemote()
|
||||
// TODO: Add subcommand for node soaking, node conformance, cri validation.
|
||||
case "default":
|
||||
// Use node e2e suite by default if no subcommand is specified.
|
||||
suite = remote.InitNodeE2ERemote()
|
||||
default:
|
||||
glog.Fatalf("--test-suite must be one of default or conformance")
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
if *buildOnly {
|
||||
// Build the archive and exit
|
||||
remote.CreateTestArchive(suite, *systemSpecName)
|
||||
return
|
||||
}
|
||||
|
||||
if *hosts == "" && *imageConfigFile == "" && *images == "" {
|
||||
glog.Fatalf("Must specify one of --image-config-file, --hosts, --images.")
|
||||
}
|
||||
var err error
|
||||
computeService, err = getComputeClient()
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to create gcloud compute service using defaults. Make sure you are authenticated. %v", err)
|
||||
}
|
||||
|
||||
gceImages := &internalImageConfig{
|
||||
images: make(map[string]internalGCEImage),
|
||||
}
|
||||
if *imageConfigFile != "" {
|
||||
configPath := *imageConfigFile
|
||||
if *imageConfigDir != "" {
|
||||
configPath = filepath.Join(*imageConfigDir, *imageConfigFile)
|
||||
}
|
||||
|
||||
// parse images
|
||||
imageConfigData, err := ioutil.ReadFile(configPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not read image config file provided: %v", err)
|
||||
}
|
||||
externalImageConfig := ImageConfig{Images: make(map[string]GCEImage)}
|
||||
err = yaml.Unmarshal(imageConfigData, &externalImageConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not parse image config file: %v", err)
|
||||
}
|
||||
for shortName, imageConfig := range externalImageConfig.Images {
|
||||
var images []string
|
||||
isRegex, name := false, shortName
|
||||
if imageConfig.ImageRegex != "" && imageConfig.Image == "" {
|
||||
isRegex = true
|
||||
images, err = getGCEImages(imageConfig.ImageRegex, imageConfig.Project, imageConfig.PreviousImages)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not retrieve list of images based on image prefix %q: %v", imageConfig.ImageRegex, err)
|
||||
}
|
||||
} else {
|
||||
images = []string{imageConfig.Image}
|
||||
}
|
||||
for _, image := range images {
|
||||
metadata := imageConfig.Metadata
|
||||
if len(strings.TrimSpace(*instanceMetadata)) > 0 {
|
||||
metadata += "," + *instanceMetadata
|
||||
}
|
||||
gceImage := internalGCEImage{
|
||||
image: image,
|
||||
imageDesc: imageConfig.ImageDesc,
|
||||
project: imageConfig.Project,
|
||||
metadata: getImageMetadata(metadata),
|
||||
machine: imageConfig.Machine,
|
||||
tests: imageConfig.Tests,
|
||||
resources: imageConfig.Resources,
|
||||
}
|
||||
if gceImage.imageDesc == "" {
|
||||
gceImage.imageDesc = gceImage.image
|
||||
}
|
||||
if isRegex && len(images) > 1 {
|
||||
// Use image name when shortName is not unique.
|
||||
name = image
|
||||
}
|
||||
gceImages.images[name] = gceImage
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow users to specify additional images via cli flags for local testing
|
||||
// convenience; merge in with config file
|
||||
if *images != "" {
|
||||
if *imageProject == "" {
|
||||
glog.Fatal("Must specify --image-project if you specify --images")
|
||||
}
|
||||
cliImages := strings.Split(*images, ",")
|
||||
for _, img := range cliImages {
|
||||
gceImage := internalGCEImage{
|
||||
image: img,
|
||||
project: *imageProject,
|
||||
metadata: getImageMetadata(*instanceMetadata),
|
||||
}
|
||||
gceImages.images[img] = gceImage
|
||||
}
|
||||
}
|
||||
|
||||
if len(gceImages.images) != 0 && *zone == "" {
|
||||
glog.Fatal("Must specify --zone flag")
|
||||
}
|
||||
for shortName, image := range gceImages.images {
|
||||
if image.project == "" {
|
||||
glog.Fatalf("Invalid config for %v; must specify a project", shortName)
|
||||
}
|
||||
}
|
||||
if len(gceImages.images) != 0 {
|
||||
if *project == "" {
|
||||
glog.Fatal("Must specify --project flag to launch images into")
|
||||
}
|
||||
}
|
||||
if *instanceNamePrefix == "" {
|
||||
*instanceNamePrefix = "tmp-node-e2e-" + uuid.NewUUID().String()[:8]
|
||||
}
|
||||
|
||||
// Setup coloring
|
||||
stat, _ := os.Stdout.Stat()
|
||||
useColor := (stat.Mode() & os.ModeCharDevice) != 0
|
||||
blue := ""
|
||||
noColour := ""
|
||||
if useColor {
|
||||
blue = "\033[0;34m"
|
||||
noColour = "\033[0m"
|
||||
}
|
||||
|
||||
go arc.getArchive()
|
||||
defer arc.deleteArchive()
|
||||
|
||||
results := make(chan *TestResult)
|
||||
running := 0
|
||||
for shortName := range gceImages.images {
|
||||
imageConfig := gceImages.images[shortName]
|
||||
fmt.Printf("Initializing e2e tests using image %s.\n", shortName)
|
||||
running++
|
||||
go func(image *internalGCEImage, junitFilePrefix string) {
|
||||
results <- testImage(image, junitFilePrefix)
|
||||
}(&imageConfig, shortName)
|
||||
}
|
||||
if *hosts != "" {
|
||||
for _, host := range strings.Split(*hosts, ",") {
|
||||
fmt.Printf("Initializing e2e tests using host %s.\n", host)
|
||||
running++
|
||||
go func(host string, junitFilePrefix string) {
|
||||
results <- testHost(host, *cleanup, "", junitFilePrefix, *ginkgoFlags)
|
||||
}(host, host)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all tests to complete and emit the results
|
||||
errCount := 0
|
||||
exitOk := true
|
||||
for i := 0; i < running; i++ {
|
||||
tr := <-results
|
||||
host := tr.host
|
||||
fmt.Println() // Print an empty line
|
||||
fmt.Printf("%s>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>%s\n", blue, noColour)
|
||||
fmt.Printf("%s> START TEST >%s\n", blue, noColour)
|
||||
fmt.Printf("%s>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>%s\n", blue, noColour)
|
||||
fmt.Printf("Start Test Suite on Host %s\n", host)
|
||||
fmt.Printf("%s\n", tr.output)
|
||||
if tr.err != nil {
|
||||
errCount++
|
||||
fmt.Printf("Failure Finished Test Suite on Host %s\n%v\n", host, tr.err)
|
||||
} else {
|
||||
fmt.Printf("Success Finished Test Suite on Host %s\n", host)
|
||||
}
|
||||
exitOk = exitOk && tr.exitOk
|
||||
fmt.Printf("%s<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<%s\n", blue, noColour)
|
||||
fmt.Printf("%s< FINISH TEST <%s\n", blue, noColour)
|
||||
fmt.Printf("%s<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<%s\n", blue, noColour)
|
||||
fmt.Println() // Print an empty line
|
||||
}
|
||||
// Set the exit code if there were failures
|
||||
if !exitOk {
|
||||
fmt.Printf("Failure: %d errors encountered.\n", errCount)
|
||||
callGubernator(*gubernator)
|
||||
os.Exit(1)
|
||||
}
|
||||
callGubernator(*gubernator)
|
||||
}
|
||||
|
||||
func callGubernator(gubernator bool) {
|
||||
if gubernator {
|
||||
fmt.Println("Running gubernator.sh")
|
||||
output, err := exec.Command("./test/e2e_node/gubernator.sh", "y").Output()
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("gubernator.sh Failed")
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("%s", output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *Archive) getArchive() (string, error) {
|
||||
a.Do(func() { a.path, a.err = remote.CreateTestArchive(suite, *systemSpecName) })
|
||||
return a.path, a.err
|
||||
}
|
||||
|
||||
func (a *Archive) deleteArchive() {
|
||||
path, err := a.getArchive()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
os.Remove(path)
|
||||
}
|
||||
|
||||
func getImageMetadata(input string) *compute.Metadata {
|
||||
if input == "" {
|
||||
return nil
|
||||
}
|
||||
glog.V(3).Infof("parsing instance metadata: %q", input)
|
||||
raw := parseInstanceMetadata(input)
|
||||
glog.V(4).Infof("parsed instance metadata: %v", raw)
|
||||
metadataItems := []*compute.MetadataItems{}
|
||||
for k, v := range raw {
|
||||
val := v
|
||||
metadataItems = append(metadataItems, &compute.MetadataItems{
|
||||
Key: k,
|
||||
Value: &val,
|
||||
})
|
||||
}
|
||||
ret := compute.Metadata{Items: metadataItems}
|
||||
return &ret
|
||||
}
|
||||
|
||||
// Run tests in archive against host
|
||||
func testHost(host string, deleteFiles bool, imageDesc, junitFilePrefix, ginkgoFlagsStr string) *TestResult {
|
||||
instance, err := computeService.Instances.Get(*project, *zone, host).Do()
|
||||
if err != nil {
|
||||
return &TestResult{
|
||||
err: err,
|
||||
host: host,
|
||||
exitOk: false,
|
||||
}
|
||||
}
|
||||
if strings.ToUpper(instance.Status) != "RUNNING" {
|
||||
err = fmt.Errorf("instance %s not in state RUNNING, was %s.", host, instance.Status)
|
||||
return &TestResult{
|
||||
err: err,
|
||||
host: host,
|
||||
exitOk: false,
|
||||
}
|
||||
}
|
||||
externalIp := getExternalIp(instance)
|
||||
if len(externalIp) > 0 {
|
||||
remote.AddHostnameIp(host, externalIp)
|
||||
}
|
||||
|
||||
path, err := arc.getArchive()
|
||||
if err != nil {
|
||||
// Don't log fatal because we need to do any needed cleanup contained in "defer" statements
|
||||
return &TestResult{
|
||||
err: fmt.Errorf("unable to create test archive: %v.", err),
|
||||
}
|
||||
}
|
||||
|
||||
output, exitOk, err := remote.RunRemote(suite, path, host, deleteFiles, imageDesc, junitFilePrefix, *testArgs, ginkgoFlagsStr, *systemSpecName)
|
||||
return &TestResult{
|
||||
output: output,
|
||||
err: err,
|
||||
host: host,
|
||||
exitOk: exitOk,
|
||||
}
|
||||
}
|
||||
|
||||
type imageObj struct {
|
||||
creationTime time.Time
|
||||
name string
|
||||
}
|
||||
|
||||
func (io imageObj) string() string {
|
||||
return fmt.Sprintf("%q created %q", io.name, io.creationTime.String())
|
||||
}
|
||||
|
||||
type byCreationTime []imageObj
|
||||
|
||||
func (a byCreationTime) Len() int { return len(a) }
|
||||
func (a byCreationTime) Less(i, j int) bool { return a[i].creationTime.After(a[j].creationTime) }
|
||||
func (a byCreationTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// Returns a list of image names based on regex and number of previous images requested.
|
||||
func getGCEImages(imageRegex, project string, previousImages int) ([]string, error) {
|
||||
ilc, err := computeService.Images.List(project).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to list images in project %q: %v", project, err)
|
||||
}
|
||||
imageObjs := []imageObj{}
|
||||
imageRe := regexp.MustCompile(imageRegex)
|
||||
for _, instance := range ilc.Items {
|
||||
if imageRe.MatchString(instance.Name) {
|
||||
creationTime, err := time.Parse(time.RFC3339, instance.CreationTimestamp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse instance creation timestamp %q: %v", instance.CreationTimestamp, err)
|
||||
}
|
||||
io := imageObj{
|
||||
creationTime: creationTime,
|
||||
name: instance.Name,
|
||||
}
|
||||
glog.V(4).Infof("Found image %q based on regex %q in project %q", io.string(), imageRegex, project)
|
||||
imageObjs = append(imageObjs, io)
|
||||
}
|
||||
}
|
||||
sort.Sort(byCreationTime(imageObjs))
|
||||
images := []string{}
|
||||
for _, imageObj := range imageObjs {
|
||||
images = append(images, imageObj.name)
|
||||
previousImages--
|
||||
if previousImages < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
// Provision a gce instance using image and run the tests in archive against the instance.
|
||||
// Delete the instance afterward.
|
||||
func testImage(imageConfig *internalGCEImage, junitFilePrefix string) *TestResult {
|
||||
ginkgoFlagsStr := *ginkgoFlags
|
||||
// Check whether the test is for benchmark.
|
||||
if len(imageConfig.tests) > 0 {
|
||||
// Benchmark needs machine type non-empty.
|
||||
if imageConfig.machine == "" {
|
||||
imageConfig.machine = defaultMachine
|
||||
}
|
||||
// Use the Ginkgo focus in benchmark config.
|
||||
ginkgoFlagsStr += (" " + testsToGinkgoFocus(imageConfig.tests))
|
||||
}
|
||||
|
||||
host, err := createInstance(imageConfig)
|
||||
if *deleteInstances {
|
||||
defer deleteInstance(host)
|
||||
}
|
||||
if err != nil {
|
||||
return &TestResult{
|
||||
err: fmt.Errorf("unable to create gce instance with running docker daemon for image %s. %v", imageConfig.image, err),
|
||||
}
|
||||
}
|
||||
|
||||
// Only delete the files if we are keeping the instance and want it cleaned up.
|
||||
// If we are going to delete the instance, don't bother with cleaning up the files
|
||||
deleteFiles := !*deleteInstances && *cleanup
|
||||
|
||||
result := testHost(host, deleteFiles, imageConfig.imageDesc, junitFilePrefix, ginkgoFlagsStr)
|
||||
// This is a temporary solution to collect serial node serial log. Only port 1 contains useful information.
|
||||
// TODO(random-liu): Extract out and unify log collection logic with cluste e2e.
|
||||
serialPortOutput, err := computeService.Instances.GetSerialPortOutput(*project, *zone, host).Port(1).Do()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to collect serial output from node %q: %v", host, err)
|
||||
} else {
|
||||
logFilename := "serial-1.log"
|
||||
err := remote.WriteLog(host, logFilename, serialPortOutput.Contents)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to write serial output from node %q to %q: %v", host, logFilename, err)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Provision a gce instance using image
|
||||
func createInstance(imageConfig *internalGCEImage) (string, error) {
|
||||
glog.V(1).Infof("Creating instance %+v", *imageConfig)
|
||||
name := imageToInstanceName(imageConfig)
|
||||
i := &compute.Instance{
|
||||
Name: name,
|
||||
MachineType: machineType(imageConfig.machine),
|
||||
NetworkInterfaces: []*compute.NetworkInterface{
|
||||
{
|
||||
AccessConfigs: []*compute.AccessConfig{
|
||||
{
|
||||
Type: "ONE_TO_ONE_NAT",
|
||||
Name: "External NAT",
|
||||
},
|
||||
}},
|
||||
},
|
||||
Disks: []*compute.AttachedDisk{
|
||||
{
|
||||
AutoDelete: true,
|
||||
Boot: true,
|
||||
Type: "PERSISTENT",
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: sourceImage(imageConfig.image, imageConfig.project),
|
||||
DiskSizeGb: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, accelerator := range imageConfig.resources.Accelerators {
|
||||
if i.GuestAccelerators == nil {
|
||||
autoRestart := true
|
||||
i.GuestAccelerators = []*compute.AcceleratorConfig{}
|
||||
i.Scheduling = &compute.Scheduling{
|
||||
OnHostMaintenance: "TERMINATE",
|
||||
AutomaticRestart: &autoRestart,
|
||||
}
|
||||
}
|
||||
aType := fmt.Sprintf(acceleratorTypeResourceFormat, *project, *zone, accelerator.Type)
|
||||
ac := &compute.AcceleratorConfig{
|
||||
AcceleratorCount: accelerator.Count,
|
||||
AcceleratorType: aType,
|
||||
}
|
||||
i.GuestAccelerators = append(i.GuestAccelerators, ac)
|
||||
}
|
||||
|
||||
var err error
|
||||
i.Metadata = imageConfig.metadata
|
||||
if _, err := computeService.Instances.Get(*project, *zone, i.Name).Do(); err != nil {
|
||||
op, err := computeService.Instances.Insert(*project, *zone, i).Do()
|
||||
if err != nil {
|
||||
ret := fmt.Sprintf("could not create instance %s: API error: %v", name, err)
|
||||
if op != nil {
|
||||
ret = fmt.Sprintf("%s: %v", ret, op.Error)
|
||||
}
|
||||
return "", fmt.Errorf(ret)
|
||||
} else if op.Error != nil {
|
||||
return "", fmt.Errorf("could not create instance %s: %+v", name, op.Error)
|
||||
}
|
||||
}
|
||||
|
||||
instanceRunning := false
|
||||
for i := 0; i < 30 && !instanceRunning; i++ {
|
||||
if i > 0 {
|
||||
time.Sleep(time.Second * 20)
|
||||
}
|
||||
var instance *compute.Instance
|
||||
instance, err = computeService.Instances.Get(*project, *zone, name).Do()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if strings.ToUpper(instance.Status) != "RUNNING" {
|
||||
err = fmt.Errorf("instance %s not in state RUNNING, was %s.", name, instance.Status)
|
||||
continue
|
||||
}
|
||||
externalIp := getExternalIp(instance)
|
||||
if len(externalIp) > 0 {
|
||||
remote.AddHostnameIp(name, externalIp)
|
||||
}
|
||||
// TODO(random-liu): Remove the docker version check. Use some other command to check
|
||||
// instance readiness.
|
||||
var output string
|
||||
output, err = remote.SSH(name, "docker", "version")
|
||||
if err != nil {
|
||||
err = fmt.Errorf("instance %s not running docker daemon - Command failed: %s", name, output)
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(output, "Server") {
|
||||
err = fmt.Errorf("instance %s not running docker daemon - Server not found: %s", name, output)
|
||||
continue
|
||||
}
|
||||
instanceRunning = true
|
||||
}
|
||||
// If instance didn't reach running state in time, return with error now.
|
||||
if err != nil {
|
||||
return name, err
|
||||
}
|
||||
// Instance reached running state in time, make sure that cloud-init is complete
|
||||
if isCloudInitUsed(imageConfig.metadata) {
|
||||
cloudInitFinished := false
|
||||
for i := 0; i < 60 && !cloudInitFinished; i++ {
|
||||
if i > 0 {
|
||||
time.Sleep(time.Second * 20)
|
||||
}
|
||||
var finished string
|
||||
finished, err = remote.SSH(name, "ls", "/var/lib/cloud/instance/boot-finished")
|
||||
if err != nil {
|
||||
err = fmt.Errorf("instance %s has not finished cloud-init script: %s", name, finished)
|
||||
continue
|
||||
}
|
||||
cloudInitFinished = true
|
||||
}
|
||||
}
|
||||
return name, err
|
||||
}
|
||||
|
||||
func isCloudInitUsed(metadata *compute.Metadata) bool {
|
||||
if metadata == nil {
|
||||
return false
|
||||
}
|
||||
for _, item := range metadata.Items {
|
||||
if item.Key == "user-data" && item.Value != nil && strings.HasPrefix(*item.Value, "#cloud-config") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getExternalIp(instance *compute.Instance) string {
|
||||
for i := range instance.NetworkInterfaces {
|
||||
ni := instance.NetworkInterfaces[i]
|
||||
for j := range ni.AccessConfigs {
|
||||
ac := ni.AccessConfigs[j]
|
||||
if len(ac.NatIP) > 0 {
|
||||
return ac.NatIP
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getComputeClient() (*compute.Service, error) {
|
||||
const retries = 10
|
||||
const backoff = time.Second * 6
|
||||
|
||||
// Setup the gce client for provisioning instances
|
||||
// Getting credentials on gce jenkins is flaky, so try a couple times
|
||||
var err error
|
||||
var cs *compute.Service
|
||||
for i := 0; i < retries; i++ {
|
||||
if i > 0 {
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
client, err = google.DefaultClient(oauth2.NoContext, compute.ComputeScope)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
cs, err = compute.New(client)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return cs, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func deleteInstance(host string) {
|
||||
glog.Infof("Deleting instance %q", host)
|
||||
_, err := computeService.Instances.Delete(*project, *zone, host).Do()
|
||||
if err != nil {
|
||||
glog.Errorf("Error deleting instance %q: %v", host, err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseInstanceMetadata(str string) map[string]string {
|
||||
metadata := make(map[string]string)
|
||||
ss := strings.Split(str, ",")
|
||||
for _, s := range ss {
|
||||
kv := strings.Split(s, "=")
|
||||
if len(kv) == 2 {
|
||||
metadata[kv[0]] = kv[1]
|
||||
continue
|
||||
}
|
||||
kp := strings.Split(s, "<")
|
||||
if len(kp) != 2 {
|
||||
glog.Fatalf("Invalid instance metadata: %q", s)
|
||||
continue
|
||||
}
|
||||
metaPath := kp[1]
|
||||
if *imageConfigDir != "" {
|
||||
metaPath = filepath.Join(*imageConfigDir, metaPath)
|
||||
}
|
||||
v, err := ioutil.ReadFile(metaPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to read metadata file %q: %v", metaPath, err)
|
||||
continue
|
||||
}
|
||||
metadata[kp[0]] = string(v)
|
||||
}
|
||||
for k, v := range nodeEnvs {
|
||||
metadata[k] = v
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
func imageToInstanceName(imageConfig *internalGCEImage) string {
|
||||
if imageConfig.machine == "" {
|
||||
return *instanceNamePrefix + "-" + imageConfig.image
|
||||
}
|
||||
// For benchmark test, node name has the format 'machine-image-uuid' to run
|
||||
// different machine types with the same image in parallel
|
||||
return imageConfig.machine + "-" + imageConfig.image + "-" + uuid.NewUUID().String()[:8]
|
||||
}
|
||||
|
||||
func sourceImage(image, imageProject string) string {
|
||||
return fmt.Sprintf("projects/%s/global/images/%s", imageProject, image)
|
||||
}
|
||||
|
||||
func machineType(machine string) string {
|
||||
if machine == "" {
|
||||
machine = defaultMachine
|
||||
}
|
||||
return fmt.Sprintf("zones/%s/machineTypes/%s", *zone, machine)
|
||||
}
|
||||
|
||||
// testsToGinkgoFocus converts the test string list to Ginkgo focus
|
||||
func testsToGinkgoFocus(tests []string) string {
|
||||
focus := "--focus=\""
|
||||
for i, test := range tests {
|
||||
if i == 0 {
|
||||
focus += test
|
||||
} else {
|
||||
focus += ("|" + test)
|
||||
}
|
||||
}
|
||||
return focus + "\""
|
||||
}
|
409
vendor/k8s.io/kubernetes/test/e2e_node/runtime_conformance_test.go
generated
vendored
Normal file
409
vendor/k8s.io/kubernetes/test/e2e_node/runtime_conformance_test.go
generated
vendored
Normal file
@ -0,0 +1,409 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/services"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const (
|
||||
consistentCheckTimeout = time.Second * 5
|
||||
retryTimeout = time.Minute * 5
|
||||
pollInterval = time.Second * 1
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
||||
f := framework.NewDefaultFramework("runtime-conformance")
|
||||
|
||||
Describe("container runtime conformance blackbox test", func() {
|
||||
Context("when starting a container that exits", func() {
|
||||
framework.ConformanceIt("it should run with the expected status", func() {
|
||||
restartCountVolumeName := "restart-count"
|
||||
restartCountVolumePath := "/restart-count"
|
||||
testContainer := v1.Container{
|
||||
Image: busyboxImage,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: restartCountVolumePath,
|
||||
Name: restartCountVolumeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
testVolumes := []v1.Volume{
|
||||
{
|
||||
Name: restartCountVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
|
||||
},
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
Name string
|
||||
RestartPolicy v1.RestartPolicy
|
||||
Phase v1.PodPhase
|
||||
State ContainerState
|
||||
RestartCount int32
|
||||
Ready bool
|
||||
}{
|
||||
{"terminate-cmd-rpa", v1.RestartPolicyAlways, v1.PodRunning, ContainerStateRunning, 2, true},
|
||||
{"terminate-cmd-rpof", v1.RestartPolicyOnFailure, v1.PodSucceeded, ContainerStateTerminated, 1, false},
|
||||
{"terminate-cmd-rpn", v1.RestartPolicyNever, v1.PodFailed, ContainerStateTerminated, 0, false},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
|
||||
// It failed at the 1st run, then succeeded at 2nd run, then run forever
|
||||
cmdScripts := `
|
||||
f=%s
|
||||
count=$(echo 'hello' >> $f ; wc -l $f | awk {'print $1'})
|
||||
if [ $count -eq 1 ]; then
|
||||
exit 1
|
||||
fi
|
||||
if [ $count -eq 2 ]; then
|
||||
exit 0
|
||||
fi
|
||||
while true; do sleep 1; done
|
||||
`
|
||||
tmpCmd := fmt.Sprintf(cmdScripts, path.Join(restartCountVolumePath, "restartCount"))
|
||||
testContainer.Name = testCase.Name
|
||||
testContainer.Command = []string{"sh", "-c", tmpCmd}
|
||||
terminateContainer := ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
Container: testContainer,
|
||||
RestartPolicy: testCase.RestartPolicy,
|
||||
Volumes: testVolumes,
|
||||
PodSecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0",
|
||||
},
|
||||
},
|
||||
}
|
||||
terminateContainer.Create()
|
||||
defer terminateContainer.Delete()
|
||||
|
||||
By("it should get the expected 'RestartCount'")
|
||||
Eventually(func() (int32, error) {
|
||||
status, err := terminateContainer.GetStatus()
|
||||
return status.RestartCount, err
|
||||
}, retryTimeout, pollInterval).Should(Equal(testCase.RestartCount))
|
||||
|
||||
By("it should get the expected 'Phase'")
|
||||
Eventually(terminateContainer.GetPhase, retryTimeout, pollInterval).Should(Equal(testCase.Phase))
|
||||
|
||||
By("it should get the expected 'Ready' condition")
|
||||
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
|
||||
|
||||
status, err := terminateContainer.GetStatus()
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("it should get the expected 'State'")
|
||||
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
|
||||
|
||||
By("it should be possible to delete [Conformance]")
|
||||
Expect(terminateContainer.Delete()).To(Succeed())
|
||||
Eventually(terminateContainer.Present, retryTimeout, pollInterval).Should(BeFalse())
|
||||
}
|
||||
})
|
||||
|
||||
rootUser := int64(0)
|
||||
nonRootUser := int64(10000)
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
container v1.Container
|
||||
phase v1.PodPhase
|
||||
message gomegatypes.GomegaMatcher
|
||||
}{
|
||||
{
|
||||
name: "if TerminationMessagePath is set [Conformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo -n DONE > /dev/termination-log"},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &rootUser,
|
||||
},
|
||||
},
|
||||
phase: v1.PodSucceeded,
|
||||
message: Equal("DONE"),
|
||||
},
|
||||
|
||||
{
|
||||
name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo -n DONE > /dev/termination-custom-log"},
|
||||
TerminationMessagePath: "/dev/termination-custom-log",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &nonRootUser,
|
||||
},
|
||||
},
|
||||
phase: v1.PodSucceeded,
|
||||
message: Equal("DONE"),
|
||||
},
|
||||
|
||||
{
|
||||
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo -n DONE; /bin/false"},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
||||
},
|
||||
phase: v1.PodFailed,
|
||||
message: Equal("DONE\n"),
|
||||
},
|
||||
|
||||
{
|
||||
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo DONE; /bin/true"},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
||||
},
|
||||
phase: v1.PodSucceeded,
|
||||
message: Equal(""),
|
||||
},
|
||||
|
||||
{
|
||||
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo -n OK > /dev/termination-log; /bin/echo DONE; /bin/true"},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
||||
},
|
||||
phase: v1.PodSucceeded,
|
||||
message: Equal("OK"),
|
||||
},
|
||||
} {
|
||||
It(fmt.Sprintf("should report termination message %s", testCase.name), func() {
|
||||
testCase.container.Name = "termination-message-container"
|
||||
c := ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
Container: testCase.container,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
}
|
||||
|
||||
By("create the container")
|
||||
c.Create()
|
||||
defer c.Delete()
|
||||
|
||||
By(fmt.Sprintf("wait for the container to reach %s", testCase.phase))
|
||||
Eventually(c.GetPhase, retryTimeout, pollInterval).Should(Equal(testCase.phase))
|
||||
|
||||
By("get the container status")
|
||||
status, err := c.GetStatus()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("the container should be terminated")
|
||||
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
|
||||
|
||||
By("the termination message should be set")
|
||||
Expect(status.State.Terminated.Message).Should(testCase.message)
|
||||
|
||||
By("delete the container")
|
||||
Expect(c.Delete()).To(Succeed())
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Context("when running a container with a new image", func() {
|
||||
// The service account only has pull permission
|
||||
auth := `
|
||||
{
|
||||
"auths": {
|
||||
"https://gcr.io": {
|
||||
"auth": "X2pzb25fa2V5OnsKICAidHlwZSI6ICJzZXJ2aWNlX2FjY291bnQiLAogICJwcm9qZWN0X2lkIjogImF1dGhlbnRpY2F0ZWQtaW1hZ2UtcHVsbGluZyIsCiAgInByaXZhdGVfa2V5X2lkIjogImI5ZjJhNjY0YWE5YjIwNDg0Y2MxNTg2MDYzZmVmZGExOTIyNGFjM2IiLAogICJwcml2YXRlX2tleSI6ICItLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzdTSG5LVEVFaVlMamZcbkpmQVBHbUozd3JCY2VJNTBKS0xxS21GWE5RL3REWGJRK2g5YVl4aldJTDhEeDBKZTc0bVovS01uV2dYRjVLWlNcbm9BNktuSU85Yi9SY1NlV2VpSXRSekkzL1lYVitPNkNjcmpKSXl4anFWam5mVzJpM3NhMzd0OUE5VEZkbGZycm5cbjR6UkpiOWl4eU1YNGJMdHFGR3ZCMDNOSWl0QTNzVlo1ODhrb1FBZmgzSmhhQmVnTWorWjRSYko0aGVpQlFUMDNcbnZVbzViRWFQZVQ5RE16bHdzZWFQV2dydDZOME9VRGNBRTl4bGNJek11MjUzUG4vSzgySFpydEx4akd2UkhNVXhcbng0ZjhwSnhmQ3h4QlN3Z1NORit3OWpkbXR2b0wwRmE3ZGducFJlODZWRDY2ejNZenJqNHlLRXRqc2hLZHl5VWRcbkl5cVhoN1JSQWdNQkFBRUNnZ0VBT3pzZHdaeENVVlFUeEFka2wvSTVTRFVidi9NazRwaWZxYjJEa2FnbmhFcG9cbjFJajJsNGlWMTByOS9uenJnY2p5VlBBd3pZWk1JeDFBZVF0RDdoUzRHWmFweXZKWUc3NkZpWFpQUm9DVlB6b3VcbmZyOGRDaWFwbDV0enJDOWx2QXNHd29DTTdJWVRjZmNWdDdjRTEyRDNRS3NGNlo3QjJ6ZmdLS251WVBmK0NFNlRcbmNNMHkwaCtYRS9kMERvSERoVy96YU1yWEhqOFRvd2V1eXRrYmJzNGYvOUZqOVBuU2dET1lQd2xhbFZUcitGUWFcbkpSd1ZqVmxYcEZBUW14M0Jyd25rWnQzQ2lXV2lGM2QrSGk5RXRVYnRWclcxYjZnK1JRT0licWFtcis4YlJuZFhcbjZWZ3FCQWtKWjhSVnlkeFVQMGQxMUdqdU9QRHhCbkhCbmM0UW9rSXJFUUtCZ1FEMUNlaWN1ZGhXdGc0K2dTeGJcbnplanh0VjFONDFtZHVjQnpvMmp5b1dHbzNQVDh3ckJPL3lRRTM0cU9WSi9pZCs4SThoWjRvSWh1K0pBMDBzNmdcblRuSXErdi9kL1RFalk4MW5rWmlDa21SUFdiWHhhWXR4UjIxS1BYckxOTlFKS2ttOHRkeVh5UHFsOE1veUdmQ1dcbjJ2aVBKS05iNkhabnY5Q3lqZEo5ZzJMRG5RS0JnUUREcVN2eURtaGViOTIzSW96NGxlZ01SK205Z2xYVWdTS2dcbkVzZlllbVJmbU5XQitDN3ZhSXlVUm1ZNU55TXhmQlZXc3dXRldLYXhjK0krYnFzZmx6elZZdFpwMThNR2pzTURcbmZlZWZBWDZCWk1zVXQ3Qmw3WjlWSjg1bnRFZHFBQ0xwWitaLzN0SVJWdWdDV1pRMWhrbmxHa0dUMDI0SkVFKytcbk55SDFnM2QzUlFLQmdRQ1J2MXdKWkkwbVBsRklva0tGTkh1YTBUcDNLb1JTU1hzTURTVk9NK2xIckcxWHJtRjZcbkMwNGNTKzQ0N0dMUkxHOFVUaEpKbTRxckh0Ti9aK2dZOTYvMm1xYjRIakpORDM3TVhKQnZFYTN5ZUxTOHEvK1JcbjJGOU1LamRRaU5LWnhQcG84VzhOSlREWTVOa1BaZGh4a2pzSHdVNGRTNjZwMVRESUU0MGd0TFpaRFFLQmdGaldcbktyblFpTnEzOS9iNm5QOFJNVGJDUUFKbmR3anhTUU5kQTVmcW1rQTlhRk9HbCtqamsxQ1BWa0tNSWxLSmdEYkpcbk9heDl2OUc2Ui9NSTFIR1hmV3QxWU56VnRocjRIdHNyQTB0U3BsbWhwZ05XRTZWejZuQURqdGZQSnMyZUdqdlhcbmpQUnArdjhjY21MK3dTZzhQTGprM3ZsN2VlNXJsWWxNQndNdUdjUHhBb0dBZWRueGJXMVJMbVZubEFpSEx1L0xcbmxtZkF3RFdtRWlJMFVnK1BMbm9Pdk81dFE1ZDRXMS94RU44bFA0cWtzcGtmZk1Rbk5oNFNZR0VlQlQzMlpxQ1RcbkpSZ2YwWGpveXZ2dXA5eFhqTWtYcnBZL3ljMXpmcVRaQzBNTzkvMVVjMWJSR2RaMmR5M2xSNU5XYXA3T1h5Zk9cblBQcE5Gb1BUWGd2M3FDcW5sTEhyR3pNPVxuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICAiY2xpZW50X2VtYWlsIjogImltYWdlLXB1bGxpbmdAYXV0aGVudGljYXRlZC1pbWFnZS1wdWxsaW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjExMzc5NzkxNDUzMDA3MzI3ODcxMiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L2ltYWdlLXB1bGxpbmclNDBhdXRoZW50aWNhdGVkLWltYWdlLXB1bGxpbmcuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iCn0=",
|
||||
"email": "image-pulling@authenticated-image-pulling.iam.gserviceaccount.com"
|
||||
}
|
||||
}
|
||||
}`
|
||||
secret := &v1.Secret{
|
||||
Data: map[string][]byte{v1.DockerConfigJsonKey: []byte(auth)},
|
||||
Type: v1.SecretTypeDockerConfigJson,
|
||||
}
|
||||
// The following images are not added into NodeImageWhiteList, because this test is
|
||||
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
|
||||
// is v1.PullAlways, so it won't be blocked by framework image white list check.
|
||||
for _, testCase := range []struct {
|
||||
description string
|
||||
image string
|
||||
secret bool
|
||||
credentialProvider bool
|
||||
phase v1.PodPhase
|
||||
waiting bool
|
||||
}{
|
||||
{
|
||||
description: "should not be able to pull image from invalid registry",
|
||||
image: "invalid.com/invalid/alpine:3.1",
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull non-existing image from gcr.io",
|
||||
image: "gcr.io/google_containers/invalid-image:invalid-tag",
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from gcr.io",
|
||||
image: "gcr.io/google_containers/alpine-with-bash:1.0",
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from docker hub",
|
||||
image: "alpine:3.1",
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull from private registry without secret",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull from private registry with secret",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
secret: true,
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull from private registry with credential provider",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
credentialProvider: true,
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
} {
|
||||
testCase := testCase
|
||||
It(testCase.description+" [Conformance]", func() {
|
||||
name := "image-pull-test"
|
||||
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
|
||||
container := ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
Container: v1.Container{
|
||||
Name: name,
|
||||
Image: testCase.image,
|
||||
Command: command,
|
||||
// PullAlways makes sure that the image will always be pulled even if it is present before the test.
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
}
|
||||
if testCase.secret {
|
||||
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
|
||||
By("create image pull secret")
|
||||
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
|
||||
container.ImagePullSecrets = []string{secret.Name}
|
||||
}
|
||||
if testCase.credentialProvider {
|
||||
configFile := filepath.Join(services.KubeletRootDirectory, "config.json")
|
||||
err := ioutil.WriteFile(configFile, []byte(auth), 0644)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer os.Remove(configFile)
|
||||
}
|
||||
// checkContainerStatus checks whether the container status matches expectation.
|
||||
checkContainerStatus := func() error {
|
||||
status, err := container.GetStatus()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container status: %v", err)
|
||||
}
|
||||
// We need to check container state first. The default pod status is pending, If we check
|
||||
// pod phase first, and the expected pod phase is Pending, the container status may not
|
||||
// even show up when we check it.
|
||||
// Check container state
|
||||
if !testCase.waiting {
|
||||
if status.State.Running == nil {
|
||||
return fmt.Errorf("expected container state: Running, got: %q",
|
||||
GetContainerState(status.State))
|
||||
}
|
||||
}
|
||||
if testCase.waiting {
|
||||
if status.State.Waiting == nil {
|
||||
return fmt.Errorf("expected container state: Waiting, got: %q",
|
||||
GetContainerState(status.State))
|
||||
}
|
||||
reason := status.State.Waiting.Reason
|
||||
if reason != images.ErrImagePull.Error() &&
|
||||
reason != images.ErrImagePullBackOff.Error() {
|
||||
return fmt.Errorf("unexpected waiting reason: %q", reason)
|
||||
}
|
||||
}
|
||||
// Check pod phase
|
||||
phase, err := container.GetPhase()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod phase: %v", err)
|
||||
}
|
||||
if phase != testCase.phase {
|
||||
return fmt.Errorf("expected pod phase: %q, got: %q", testCase.phase, phase)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// The image registry is not stable, which sometimes causes the test to fail. Add retry mechanism to make this
|
||||
// less flaky.
|
||||
const flakeRetry = 3
|
||||
for i := 1; i <= flakeRetry; i++ {
|
||||
var err error
|
||||
By("create the container")
|
||||
container.Create()
|
||||
By("check the container status")
|
||||
for start := time.Now(); time.Since(start) < retryTimeout; time.Sleep(pollInterval) {
|
||||
if err = checkContainerStatus(); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
By("delete the container")
|
||||
container.Delete()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if i < flakeRetry {
|
||||
framework.Logf("No.%d attempt failed: %v, retrying...", i, err)
|
||||
} else {
|
||||
framework.Failf("All %d attempts failed: %v", flakeRetry, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
499
vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go
generated
vendored
Normal file
499
vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go
generated
vendored
Normal file
@ -0,0 +1,499 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Security Context", func() {
|
||||
f := framework.NewDefaultFramework("security-context-test")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
Context("when creating a pod in the host PID namespace", func() {
|
||||
makeHostPidPod := func(podName, image string, command []string, hostPID bool) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
HostPID: hostPID,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: podName,
|
||||
Command: command,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndWaitHostPidPod := func(podName string, hostPID bool) {
|
||||
podClient.Create(makeHostPidPod(podName,
|
||||
busyboxImage,
|
||||
[]string{"sh", "-c", "pidof nginx || true"},
|
||||
hostPID,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
nginxPid := ""
|
||||
BeforeEach(func() {
|
||||
nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID())
|
||||
podClient.CreateSync(makeHostPidPod(nginxPodName,
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
nil,
|
||||
true,
|
||||
))
|
||||
|
||||
output := f.ExecShellInContainer(nginxPodName, nginxPodName,
|
||||
"cat /var/run/nginx.pid")
|
||||
nginxPid = strings.TrimSpace(output)
|
||||
})
|
||||
|
||||
It("should show its pid in the host PID namespace", func() {
|
||||
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
|
||||
createAndWaitHostPidPod(busyboxPodName, true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
|
||||
}
|
||||
|
||||
pids := strings.TrimSpace(logs)
|
||||
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
|
||||
if pids == "" {
|
||||
framework.Failf("nginx's pid should be seen by hostpid containers")
|
||||
}
|
||||
|
||||
pidSets := sets.NewString(strings.Split(pids, " ")...)
|
||||
if !pidSets.Has(nginxPid) {
|
||||
framework.Failf("nginx's pid should be seen by hostpid containers")
|
||||
}
|
||||
})
|
||||
|
||||
It("should not show its pid in the non-hostpid containers", func() {
|
||||
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
|
||||
createAndWaitHostPidPod(busyboxPodName, false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
|
||||
}
|
||||
|
||||
pids := strings.TrimSpace(logs)
|
||||
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
|
||||
pidSets := sets.NewString(strings.Split(pids, " ")...)
|
||||
if pidSets.Has(nginxPid) {
|
||||
framework.Failf("nginx's pid should not be seen by non-hostpid containers")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("when creating a pod in the host IPC namespace", func() {
|
||||
makeHostIPCPod := func(podName, image string, command []string, hostIPC bool) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
HostIPC: hostIPC,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: podName,
|
||||
Command: command,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndWaitHostIPCPod := func(podName string, hostNetwork bool) {
|
||||
podClient.Create(makeHostIPCPod(podName,
|
||||
busyboxImage,
|
||||
[]string{"sh", "-c", "ipcs -m | awk '{print $2}'"},
|
||||
hostNetwork,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
hostSharedMemoryID := ""
|
||||
BeforeEach(func() {
|
||||
output, err := exec.Command("sh", "-c", "ipcmk -M 1M | awk '{print $NF}'").Output()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create the shared memory on the host: %v", err)
|
||||
}
|
||||
hostSharedMemoryID = strings.TrimSpace(string(output))
|
||||
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
|
||||
})
|
||||
|
||||
It("should show the shared memory ID in the host IPC containers", func() {
|
||||
busyboxPodName := "busybox-hostipc-" + string(uuid.NewUUID())
|
||||
createAndWaitHostIPCPod(busyboxPodName, true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
|
||||
}
|
||||
|
||||
podSharedMemoryIDs := strings.TrimSpace(logs)
|
||||
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, busyboxPodName)
|
||||
if !strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
|
||||
framework.Failf("hostIPC container should show shared memory IDs on host")
|
||||
}
|
||||
})
|
||||
|
||||
It("should not show the shared memory ID in the non-hostIPC containers", func() {
|
||||
busyboxPodName := "busybox-non-hostipc-" + string(uuid.NewUUID())
|
||||
createAndWaitHostIPCPod(busyboxPodName, false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
|
||||
}
|
||||
|
||||
podSharedMemoryIDs := strings.TrimSpace(logs)
|
||||
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, busyboxPodName)
|
||||
if strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
|
||||
framework.Failf("non-hostIPC container should not show shared memory IDs on host")
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if hostSharedMemoryID != "" {
|
||||
_, err := exec.Command("sh", "-c", fmt.Sprintf("ipcrm -m %q", hostSharedMemoryID)).Output()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to remove shared memory %q on the host: %v", hostSharedMemoryID, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("when creating a pod in the host network namespace", func() {
|
||||
makeHostNetworkPod := func(podName, image string, command []string, hostNetwork bool) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
HostNetwork: hostNetwork,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: podName,
|
||||
Command: command,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
listListeningPortsCommand := []string{"sh", "-c", "netstat -ln"}
|
||||
createAndWaitHostNetworkPod := func(podName string, hostNetwork bool) {
|
||||
podClient.Create(makeHostNetworkPod(podName,
|
||||
busyboxImage,
|
||||
listListeningPortsCommand,
|
||||
hostNetwork,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
listeningPort := ""
|
||||
var l net.Listener
|
||||
var err error
|
||||
BeforeEach(func() {
|
||||
l, err = net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
framework.Failf("Failed to open a new tcp port: %v", err)
|
||||
}
|
||||
addr := strings.Split(l.Addr().String(), ":")
|
||||
listeningPort = addr[len(addr)-1]
|
||||
framework.Logf("Opened a new tcp port %q", listeningPort)
|
||||
})
|
||||
|
||||
It("should listen on same port in the host network containers", func() {
|
||||
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
|
||||
createAndWaitHostNetworkPod(busyboxPodName, true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
|
||||
}
|
||||
|
||||
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
|
||||
if !strings.Contains(logs, listeningPort) {
|
||||
framework.Failf("host-networked container should listening on same port as host")
|
||||
}
|
||||
})
|
||||
|
||||
It("shouldn't show the same port in the non-hostnetwork containers", func() {
|
||||
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
|
||||
createAndWaitHostNetworkPod(busyboxPodName, false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
|
||||
}
|
||||
|
||||
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
|
||||
if strings.Contains(logs, listeningPort) {
|
||||
framework.Failf("non-hostnetworked container shouldn't show the same port as host")
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if l != nil {
|
||||
l.Close()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("When creating a container with runAsUser", func() {
|
||||
makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: podName,
|
||||
Command: command,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &userid,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndWaitUserPod := func(userid int64) {
|
||||
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
|
||||
podClient.Create(makeUserPod(podName,
|
||||
busyboxImage,
|
||||
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
|
||||
userid,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
It("should run the container with uid 65534", func() {
|
||||
createAndWaitUserPod(65534)
|
||||
})
|
||||
|
||||
It("should run the container with uid 0", func() {
|
||||
createAndWaitUserPod(0)
|
||||
})
|
||||
})
|
||||
|
||||
Context("When creating a pod with readOnlyRootFilesystem", func() {
|
||||
makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: podName,
|
||||
Command: command,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string {
|
||||
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
|
||||
podClient.Create(makeUserPod(podName,
|
||||
"busybox",
|
||||
[]string{"sh", "-c", "touch checkfile"},
|
||||
readOnlyRootFilesystem,
|
||||
))
|
||||
|
||||
if readOnlyRootFilesystem {
|
||||
podClient.WaitForFailure(podName, framework.PodStartTimeout)
|
||||
} else {
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
return podName
|
||||
}
|
||||
|
||||
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true", func() {
|
||||
createAndWaitUserPod(true)
|
||||
})
|
||||
|
||||
It("should run the container with writable rootfs when readOnlyRootFilesystem=false", func() {
|
||||
createAndWaitUserPod(false)
|
||||
})
|
||||
})
|
||||
|
||||
Context("when creating containers with AllowPrivilegeEscalation", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
if framework.TestContext.ContainerRuntime == "docker" {
|
||||
isSupported, err := isDockerNoNewPrivilegesSupported()
|
||||
framework.ExpectNoError(err)
|
||||
if !isSupported {
|
||||
framework.Skipf("Skipping because no_new_privs is not supported in this docker")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||
Name: podName,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: allowPrivilegeEscalation,
|
||||
RunAsUser: &uid,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
|
||||
podClient.Create(makeAllowPrivilegeEscalationPod(podName,
|
||||
allowPrivilegeEscalation,
|
||||
uid,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
|
||||
if err := podClient.MatchContainerOutput(podName, podName, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
It("should allow privilege escalation when not explicitly set and uid != 0", func() {
|
||||
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
|
||||
framework.Failf("Match output for pod %q failed: %v", podName, err)
|
||||
}
|
||||
})
|
||||
|
||||
It("should not allow privilege escalation when false", func() {
|
||||
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
|
||||
apeFalse := false
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
|
||||
framework.Failf("Match output for pod %q failed: %v", podName, err)
|
||||
}
|
||||
})
|
||||
|
||||
It("should allow privilege escalation when true", func() {
|
||||
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
|
||||
apeTrue := true
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
|
||||
framework.Failf("Match output for pod %q failed: %v", podName, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("When creating a pod with privileged", func() {
|
||||
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: podName,
|
||||
Command: command,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndWaitUserPod := func(privileged bool) string {
|
||||
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
|
||||
podClient.Create(makeUserPod(podName,
|
||||
busyboxImage,
|
||||
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"},
|
||||
privileged,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
|
||||
return podName
|
||||
}
|
||||
|
||||
It("should run the container as privileged when true", func() {
|
||||
podName := createAndWaitUserPod(true)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||
}
|
||||
|
||||
framework.Logf("Got logs for pod %q: %q", podName, logs)
|
||||
if strings.Contains(logs, "Operation not permitted") {
|
||||
framework.Failf("privileged container should be able to create dummy device")
|
||||
}
|
||||
})
|
||||
|
||||
It("should run the container as unprivileged when false", func() {
|
||||
podName := createAndWaitUserPod(false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||
}
|
||||
|
||||
framework.Logf("Got logs for pod %q: %q", podName, logs)
|
||||
if !strings.Contains(logs, "Operation not permitted") {
|
||||
framework.Failf("unprivileged container shouldn't be able to create dummy device")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
})
|
56
vendor/k8s.io/kubernetes/test/e2e_node/services/BUILD
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/test/e2e_node/services/BUILD
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"apiserver.go",
|
||||
"etcd.go",
|
||||
"internal_services.go",
|
||||
"kubelet.go",
|
||||
"logs.go",
|
||||
"namespace_controller.go",
|
||||
"server.go",
|
||||
"services.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/services",
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/controller/namespace:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
91
vendor/k8s.io/kubernetes/test/e2e_node/services/apiserver.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/test/e2e_node/services/apiserver.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
apiserver "k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
)
|
||||
|
||||
const (
|
||||
clusterIPRange = "10.0.0.1/24"
|
||||
apiserverClientURL = "http://localhost:8080"
|
||||
apiserverHealthCheckURL = apiserverClientURL + "/healthz"
|
||||
)
|
||||
|
||||
// APIServer is a server which manages apiserver.
|
||||
type APIServer struct{}
|
||||
|
||||
// NewAPIServer creates an apiserver.
|
||||
func NewAPIServer() *APIServer {
|
||||
return &APIServer{}
|
||||
}
|
||||
|
||||
// Start starts the apiserver, returns when apiserver is ready.
|
||||
func (a *APIServer) Start() error {
|
||||
config := options.NewServerRunOptions()
|
||||
config.Etcd.StorageConfig.ServerList = []string{getEtcdClientURL()}
|
||||
// TODO: Current setup of etcd in e2e-node tests doesn't support etcd v3
|
||||
// protocol. We should migrate it to use the same infrastructure as all
|
||||
// other tests (pkg/storage/etcd/testing).
|
||||
config.Etcd.StorageConfig.Type = "etcd2"
|
||||
_, ipnet, err := net.ParseCIDR(clusterIPRange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ServiceClusterIPRange = *ipnet
|
||||
config.AllowPrivileged = true
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
err := apiserver.Run(config, stopCh)
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("run apiserver error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = readinessCheck("apiserver", []string{apiserverHealthCheckURL}, errCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the apiserver. Currently, there is no way to stop the apiserver.
|
||||
// The function is here only for completion.
|
||||
func (a *APIServer) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const apiserverName = "apiserver"
|
||||
|
||||
func (a *APIServer) Name() string {
|
||||
return apiserverName
|
||||
}
|
||||
|
||||
func getAPIServerClientURL() string {
|
||||
return apiserverClientURL
|
||||
}
|
||||
|
||||
func getAPIServerHealthCheckURL() string {
|
||||
return apiserverHealthCheckURL
|
||||
}
|
160
vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go
generated
vendored
Normal file
160
vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v2http"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// TODO: These tests should not be leveraging v2http
|
||||
// TODO(random-liu): Add service interface to manage services with the same behaviour.
|
||||
|
||||
// All following configurations are got from etcd source code.
|
||||
// TODO(random-liu): Use embed.NewConfig after etcd3 is supported.
|
||||
const (
|
||||
etcdName = "etcd"
|
||||
clientURLStr = "http://localhost:4001" // clientURL has listener created and handles etcd API traffic
|
||||
peerURLStr = "http://localhost:7001" // peerURL does't have listener created, it is used to pass Etcd validation
|
||||
snapCount = etcdserver.DefaultSnapCount
|
||||
maxSnapFiles = 5
|
||||
maxWALFiles = 5
|
||||
tickMs = 100
|
||||
electionTicks = 10
|
||||
etcdHealthCheckURL = clientURLStr + "/v2/keys/" // Trailing slash is required,
|
||||
)
|
||||
|
||||
// EtcdServer is a server which manages etcd.
|
||||
type EtcdServer struct {
|
||||
*etcdserver.EtcdServer
|
||||
config *etcdserver.ServerConfig
|
||||
clientListen net.Listener
|
||||
}
|
||||
|
||||
// NewEtcd creates a new default etcd server using 'dataDir' for persistence.
|
||||
func NewEtcd(dataDir string) *EtcdServer {
|
||||
clientURLs, err := types.NewURLs([]string{clientURLStr})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to parse client url %q: %v", clientURLStr, err)
|
||||
}
|
||||
peerURLs, err := types.NewURLs([]string{peerURLStr})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to parse peer url %q: %v", peerURLStr, err)
|
||||
}
|
||||
|
||||
config := &etcdserver.ServerConfig{
|
||||
Name: etcdName,
|
||||
ClientURLs: clientURLs,
|
||||
PeerURLs: peerURLs,
|
||||
DataDir: dataDir,
|
||||
InitialPeerURLsMap: map[string]types.URLs{etcdName: peerURLs},
|
||||
NewCluster: true,
|
||||
SnapCount: snapCount,
|
||||
MaxSnapFiles: maxSnapFiles,
|
||||
MaxWALFiles: maxWALFiles,
|
||||
TickMs: tickMs,
|
||||
ElectionTicks: electionTicks,
|
||||
}
|
||||
|
||||
return &EtcdServer{
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the etcd server and listening for client connections
|
||||
func (e *EtcdServer) Start() error {
|
||||
var err error
|
||||
e.EtcdServer, err = etcdserver.NewServer(e.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create client listener, there should be only one url
|
||||
e.clientListen, err = createListener(e.config.ClientURLs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// start etcd
|
||||
e.EtcdServer.Start()
|
||||
|
||||
// setup client listener
|
||||
ch := v2http.NewClientHandler(e.EtcdServer, e.config.ReqTimeout())
|
||||
errCh := make(chan error)
|
||||
go func(l net.Listener) {
|
||||
defer close(errCh)
|
||||
srv := &http.Server{
|
||||
Handler: ch,
|
||||
ReadTimeout: 5 * time.Minute,
|
||||
}
|
||||
// Serve always returns a non-nil error.
|
||||
errCh <- srv.Serve(l)
|
||||
}(e.clientListen)
|
||||
|
||||
err = readinessCheck("etcd", []string{etcdHealthCheckURL}, errCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop closes all connections and stops the Etcd server
|
||||
func (e *EtcdServer) Stop() error {
|
||||
if e.EtcdServer != nil {
|
||||
e.EtcdServer.Stop()
|
||||
}
|
||||
if e.clientListen != nil {
|
||||
err := e.clientListen.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns the server's unique name
|
||||
func (e *EtcdServer) Name() string {
|
||||
return etcdName
|
||||
}
|
||||
|
||||
func createListener(url url.URL) (net.Listener, error) {
|
||||
l, err := net.Listen("tcp", url.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l, err = transport.NewKeepAliveListener(l, url.Scheme, &tls.Config{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func getEtcdClientURL() string {
|
||||
return clientURLStr
|
||||
}
|
||||
|
||||
func getEtcdHealthCheckURL() string {
|
||||
return etcdHealthCheckURL
|
||||
}
|
143
vendor/k8s.io/kubernetes/test/e2e_node/services/internal_services.go
generated
vendored
Normal file
143
vendor/k8s.io/kubernetes/test/e2e_node/services/internal_services.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// e2eService manages e2e services in current process.
|
||||
type e2eServices struct {
|
||||
rmDirs []string
|
||||
// statically linked e2e services
|
||||
etcdServer *EtcdServer
|
||||
apiServer *APIServer
|
||||
nsController *NamespaceController
|
||||
}
|
||||
|
||||
func newE2EServices() *e2eServices {
|
||||
return &e2eServices{}
|
||||
}
|
||||
|
||||
// run starts all e2e services and wait for the termination signal. Once receives the
|
||||
// termination signal, it will stop the e2e services gracefully.
|
||||
func (es *e2eServices) run() error {
|
||||
defer es.stop()
|
||||
if err := es.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait until receiving a termination signal.
|
||||
waitForTerminationSignal()
|
||||
return nil
|
||||
}
|
||||
|
||||
// start starts the tests embedded services or returns an error.
|
||||
func (es *e2eServices) start() error {
|
||||
glog.Info("Starting e2e services...")
|
||||
err := es.startEtcd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = es.startApiServer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = es.startNamespaceController()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
glog.Info("E2E services started.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop stops the embedded e2e services.
|
||||
func (es *e2eServices) stop() {
|
||||
glog.Info("Stopping e2e services...")
|
||||
// TODO(random-liu): Use a loop to stop all services after introducing
|
||||
// service interface.
|
||||
glog.Info("Stopping namespace controller")
|
||||
if es.nsController != nil {
|
||||
if err := es.nsController.Stop(); err != nil {
|
||||
glog.Errorf("Failed to stop %q: %v", es.nsController.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Info("Stopping API server")
|
||||
if es.apiServer != nil {
|
||||
if err := es.apiServer.Stop(); err != nil {
|
||||
glog.Errorf("Failed to stop %q: %v", es.apiServer.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Info("Stopping etcd")
|
||||
if es.etcdServer != nil {
|
||||
if err := es.etcdServer.Stop(); err != nil {
|
||||
glog.Errorf("Failed to stop %q: %v", es.etcdServer.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, d := range es.rmDirs {
|
||||
glog.Infof("Deleting directory %v", d)
|
||||
err := os.RemoveAll(d)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete directory %s.\n%v", d, err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Info("E2E services stopped.")
|
||||
}
|
||||
|
||||
// startEtcd starts the embedded etcd instance or returns an error.
|
||||
func (es *e2eServices) startEtcd() error {
|
||||
glog.Info("Starting etcd")
|
||||
// Create data directory in current working space.
|
||||
dataDir, err := ioutil.TempDir(".", "etcd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Mark the dataDir as directories to remove.
|
||||
es.rmDirs = append(es.rmDirs, dataDir)
|
||||
es.etcdServer = NewEtcd(dataDir)
|
||||
return es.etcdServer.Start()
|
||||
}
|
||||
|
||||
// startApiServer starts the embedded API server or returns an error.
|
||||
func (es *e2eServices) startApiServer() error {
|
||||
glog.Info("Starting API server")
|
||||
es.apiServer = NewAPIServer()
|
||||
return es.apiServer.Start()
|
||||
}
|
||||
|
||||
// startNamespaceController starts the embedded namespace controller or returns an error.
|
||||
func (es *e2eServices) startNamespaceController() error {
|
||||
glog.Info("Starting namespace controller")
|
||||
es.nsController = NewNamespaceController(framework.TestContext.Host)
|
||||
return es.nsController.Start()
|
||||
}
|
||||
|
||||
// getServicesHealthCheckURLs returns the health check urls for the internal services.
|
||||
func getServicesHealthCheckURLs() []string {
|
||||
return []string{
|
||||
getEtcdHealthCheckURL(),
|
||||
getAPIServerHealthCheckURL(),
|
||||
}
|
||||
}
|
331
vendor/k8s.io/kubernetes/test/e2e_node/services/kubelet.go
generated
vendored
Normal file
331
vendor/k8s.io/kubernetes/test/e2e_node/services/kubelet.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
)
|
||||
|
||||
// TODO(random-liu): Replace this with standard kubelet launcher.
|
||||
|
||||
// args is the type used to accumulate args from the flags with the same name.
|
||||
type args []string
|
||||
|
||||
// String function of flag.Value
|
||||
func (a *args) String() string {
|
||||
return fmt.Sprint(*a)
|
||||
}
|
||||
|
||||
// Set function of flag.Value
|
||||
func (a *args) Set(value string) error {
|
||||
// Someone else is calling flag.Parse after the flags are parsed in the
|
||||
// test framework. Use this to avoid the flag being parsed twice.
|
||||
// TODO(random-liu): Figure out who is parsing the flags.
|
||||
if flag.Parsed() {
|
||||
return nil
|
||||
}
|
||||
// Note that we assume all white space in flag string is separating fields
|
||||
na := strings.Fields(value)
|
||||
*a = append(*a, na...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// kubeletArgs is the override kubelet args specified by the test runner.
|
||||
var kubeletArgs args
|
||||
|
||||
func init() {
|
||||
flag.Var(&kubeletArgs, "kubelet-flags", "Kubelet flags passed to kubelet, this will override default kubelet flags in the test. Flags specified in multiple kubelet-flags will be concatenate.")
|
||||
}
|
||||
|
||||
// RunKubelet starts kubelet and waits for termination signal. Once receives the
|
||||
// termination signal, it will stop the kubelet gracefully.
|
||||
func RunKubelet() {
|
||||
var err error
|
||||
// Enable monitorParent to make sure kubelet will receive termination signal
|
||||
// when test process exits.
|
||||
e := NewE2EServices(true /* monitorParent */)
|
||||
defer e.Stop()
|
||||
e.kubelet, err = e.startKubelet()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to start kubelet: %v", err)
|
||||
}
|
||||
// Wait until receiving a termination signal.
|
||||
waitForTerminationSignal()
|
||||
}
|
||||
|
||||
const (
|
||||
// Ports of different e2e services.
|
||||
kubeletPort = "10250"
|
||||
kubeletReadOnlyPort = "10255"
|
||||
KubeletRootDirectory = "/var/lib/kubelet"
|
||||
// Health check url of kubelet
|
||||
kubeletHealthCheckURL = "http://127.0.0.1:" + kubeletReadOnlyPort + "/healthz"
|
||||
)
|
||||
|
||||
// startKubelet starts the Kubelet in a separate process or returns an error
|
||||
// if the Kubelet fails to start.
|
||||
func (e *E2EServices) startKubelet() (*server, error) {
|
||||
glog.Info("Starting kubelet")
|
||||
|
||||
// set feature gates so we can check which features are enabled and pass the appropriate flags
|
||||
utilfeature.DefaultFeatureGate.Set(framework.TestContext.FeatureGates)
|
||||
|
||||
// Build kubeconfig
|
||||
kubeconfigPath, err := createKubeconfigCWD()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create pod manifest path
|
||||
manifestPath, err := createPodManifestDirectory()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.rmDirs = append(e.rmDirs, manifestPath)
|
||||
err = createRootDirectory(KubeletRootDirectory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var killCommand, restartCommand *exec.Cmd
|
||||
var isSystemd bool
|
||||
// Apply default kubelet flags.
|
||||
cmdArgs := []string{}
|
||||
if systemdRun, err := exec.LookPath("systemd-run"); err == nil {
|
||||
// On systemd services, detection of a service / unit works reliably while
|
||||
// detection of a process started from an ssh session does not work.
|
||||
// Since kubelet will typically be run as a service it also makes more
|
||||
// sense to test it that way
|
||||
isSystemd = true
|
||||
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
|
||||
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit", builder.GetKubeletServerBin())
|
||||
killCommand = exec.Command("systemctl", "kill", unitName)
|
||||
restartCommand = exec.Command("systemctl", "restart", unitName)
|
||||
e.logs["kubelet.log"] = LogFileData{
|
||||
Name: "kubelet.log",
|
||||
JournalctlCommand: []string{"-u", unitName},
|
||||
}
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--kubelet-cgroups=/kubelet.slice",
|
||||
"--cgroup-root=/",
|
||||
)
|
||||
} else {
|
||||
cmdArgs = append(cmdArgs, builder.GetKubeletServerBin())
|
||||
cmdArgs = append(cmdArgs,
|
||||
// TODO(random-liu): Get rid of this docker specific thing.
|
||||
"--runtime-cgroups=/docker-daemon",
|
||||
"--kubelet-cgroups=/kubelet",
|
||||
"--cgroup-root=/",
|
||||
"--system-cgroups=/system",
|
||||
)
|
||||
}
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--kubeconfig", kubeconfigPath,
|
||||
"--address", "0.0.0.0",
|
||||
"--port", kubeletPort,
|
||||
"--read-only-port", kubeletReadOnlyPort,
|
||||
"--root-dir", KubeletRootDirectory,
|
||||
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
|
||||
"--allow-privileged", "true",
|
||||
"--serialize-image-pulls", "false",
|
||||
"--pod-manifest-path", manifestPath,
|
||||
"--file-check-frequency", "10s", // Check file frequently so tests won't wait too long
|
||||
"--docker-disable-shared-pid=false",
|
||||
// Assign a fixed CIDR to the node because there is no node controller.
|
||||
//
|
||||
// Note: this MUST be in sync with with the IP in
|
||||
// - cluster/gce/config-test.sh and
|
||||
// - test/e2e_node/conformance/run_test.sh.
|
||||
"--pod-cidr", "10.100.0.0/24",
|
||||
"--eviction-pressure-transition-period", "30s",
|
||||
// Apply test framework feature gates by default. This could also be overridden
|
||||
// by kubelet-flags.
|
||||
"--feature-gates", framework.TestContext.FeatureGates,
|
||||
"--eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", // The hard eviction thresholds.
|
||||
"--eviction-minimum-reclaim", "nodefs.available=5%,nodefs.inodesFree=5%", // The minimum reclaimed resources after eviction.
|
||||
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
|
||||
)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
// Enable dynamic config if the feature gate is enabled
|
||||
dynamicConfigDir, err := getDynamicConfigDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "--dynamic-config-dir", dynamicConfigDir)
|
||||
}
|
||||
|
||||
// Enable kubenet by default.
|
||||
cniBinDir, err := getCNIBinDirectory()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cniConfDir, err := getCNIConfDirectory()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--network-plugin=kubenet",
|
||||
"--cni-bin-dir", cniBinDir,
|
||||
"--cni-conf-dir", cniConfDir)
|
||||
|
||||
// Keep hostname override for convenience.
|
||||
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
|
||||
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
|
||||
}
|
||||
|
||||
// Override the default kubelet flags.
|
||||
cmdArgs = append(cmdArgs, kubeletArgs...)
|
||||
|
||||
// Adjust the args if we are running kubelet with systemd.
|
||||
if isSystemd {
|
||||
adjustArgsForSystemd(cmdArgs)
|
||||
}
|
||||
|
||||
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
|
||||
server := newServer(
|
||||
"kubelet",
|
||||
cmd,
|
||||
killCommand,
|
||||
restartCommand,
|
||||
[]string{kubeletHealthCheckURL},
|
||||
"kubelet.log",
|
||||
e.monitorParent,
|
||||
true /* restartOnExit */)
|
||||
return server, server.start()
|
||||
}
|
||||
|
||||
// createPodManifestDirectory creates pod manifest directory.
|
||||
func createPodManifestDirectory() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get current working directory: %v", err)
|
||||
}
|
||||
path, err := ioutil.TempDir(cwd, "pod-manifest")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create static pod manifest directory: %v", err)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// createKubeconfig creates a kubeconfig file at the fully qualified `path`. The parent dirs must exist.
|
||||
func createKubeconfig(path string) error {
|
||||
kubeconfig := []byte(`apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubelet
|
||||
clusters:
|
||||
- cluster:
|
||||
server: ` + getAPIServerClientURL() + `
|
||||
insecure-skip-tls-verify: true
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: local-context
|
||||
current-context: local-context`)
|
||||
|
||||
if err := ioutil.WriteFile(path, kubeconfig, 0666); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createRootDirectory(path string) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return os.MkdirAll(path, os.FileMode(0755))
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubeconfigCWDPath() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get current working directory: %v", err)
|
||||
}
|
||||
return filepath.Join(cwd, "kubeconfig"), nil
|
||||
}
|
||||
|
||||
// like createKubeconfig, but creates kubeconfig at current-working-directory/kubeconfig
|
||||
// returns a fully-qualified path to the kubeconfig file
|
||||
func createKubeconfigCWD() (string, error) {
|
||||
kubeconfigPath, err := kubeconfigCWDPath()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = createKubeconfig(kubeconfigPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return kubeconfigPath, nil
|
||||
}
|
||||
|
||||
// getCNIBinDirectory returns CNI directory.
|
||||
func getCNIBinDirectory() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(cwd, "cni", "bin"), nil
|
||||
}
|
||||
|
||||
// getCNIConfDirectory returns CNI Configuration directory.
|
||||
func getCNIConfDirectory() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(cwd, "cni", "net.d"), nil
|
||||
}
|
||||
|
||||
// getDynamicConfigDir returns the directory for dynamic Kubelet configuration
|
||||
func getDynamicConfigDir() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(cwd, "dynamic-kubelet-config"), nil
|
||||
}
|
||||
|
||||
// adjustArgsForSystemd escape special characters in kubelet arguments for systemd. Systemd
|
||||
// may try to do auto expansion without escaping.
|
||||
func adjustArgsForSystemd(args []string) {
|
||||
for i := range args {
|
||||
args[i] = strings.Replace(args[i], "%", "%%", -1)
|
||||
args[i] = strings.Replace(args[i], "$", "$$", -1)
|
||||
}
|
||||
}
|
100
vendor/k8s.io/kubernetes/test/e2e_node/services/logs.go
generated
vendored
Normal file
100
vendor/k8s.io/kubernetes/test/e2e_node/services/logs.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// LogFileData holds data about logfiles to fetch with a journalctl command or
|
||||
// file from a node's file system.
|
||||
type LogFileData struct {
|
||||
// Name of the log file.
|
||||
Name string `json:"name"`
|
||||
// Files are possible absolute paths of the log file.
|
||||
Files []string `json:"files"`
|
||||
// JournalctlCommand is the journalctl command to get log.
|
||||
JournalctlCommand []string `json:"journalctl"`
|
||||
}
|
||||
|
||||
// logFiles are the type used to collect all log files. The key is the expected
|
||||
// name of the log file after collected.
|
||||
type logFiles map[string]LogFileData
|
||||
|
||||
// String function of flag.Value
|
||||
func (l *logFiles) String() string {
|
||||
return fmt.Sprint(*l)
|
||||
}
|
||||
|
||||
// Set function of flag.Value
|
||||
func (l *logFiles) Set(value string) error {
|
||||
// Someone else is calling flag.Parse after the flags are parsed in the
|
||||
// test framework. Use this to avoid the flag being parsed twice.
|
||||
// TODO(random-liu): Figure out who is parsing the flags.
|
||||
if flag.Parsed() {
|
||||
return nil
|
||||
}
|
||||
var log LogFileData
|
||||
if err := json.Unmarshal([]byte(value), &log); err != nil {
|
||||
return err
|
||||
}
|
||||
// Note that we assume all white space in flag string is separating fields
|
||||
logs := *l
|
||||
logs[log.Name] = log
|
||||
return nil
|
||||
}
|
||||
|
||||
// extraLogs is the extra logs specified by the test runner.
|
||||
var extraLogs = make(logFiles)
|
||||
|
||||
func init() {
|
||||
flag.Var(&extraLogs, "extra-log", "Extra log to collect after test in the json format of LogFile.")
|
||||
}
|
||||
|
||||
// requiredLogs is the required logs to collect after the test.
|
||||
var requiredLogs = []LogFileData{
|
||||
{
|
||||
Name: "kern.log",
|
||||
Files: []string{"/var/log/kern.log"},
|
||||
JournalctlCommand: []string{"-k"},
|
||||
},
|
||||
{
|
||||
Name: "cloud-init.log",
|
||||
Files: []string{"/var/log/cloud-init.log"},
|
||||
JournalctlCommand: []string{"-u", "cloud*"},
|
||||
},
|
||||
// TODO(random-liu): Make docker.log non-required.
|
||||
{
|
||||
Name: "docker.log",
|
||||
Files: []string{"/var/log/docker.log", "/var/log/upstart/docker.log"},
|
||||
JournalctlCommand: []string{"-u", "docker"},
|
||||
},
|
||||
}
|
||||
|
||||
// getLogFiles get all logs to collect after the test.
|
||||
func getLogFiles() logFiles {
|
||||
logs := make(logFiles)
|
||||
for _, l := range requiredLogs {
|
||||
logs[l.Name] = l
|
||||
}
|
||||
for _, l := range extraLogs {
|
||||
logs[l.Name] = l
|
||||
}
|
||||
return logs
|
||||
}
|
83
vendor/k8s.io/kubernetes/test/e2e_node/services/namespace_controller.go
generated
vendored
Normal file
83
vendor/k8s.io/kubernetes/test/e2e_node/services/namespace_controller.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
|
||||
)
|
||||
|
||||
const (
|
||||
// ncName is the name of namespace controller
|
||||
ncName = "namespace-controller"
|
||||
// ncResyncPeriod is resync period of the namespace controller
|
||||
ncResyncPeriod = 5 * time.Minute
|
||||
// ncConcurrency is concurrency of the namespace controller
|
||||
ncConcurrency = 2
|
||||
)
|
||||
|
||||
// NamespaceController is a server which manages namespace controller.
|
||||
type NamespaceController struct {
|
||||
host string
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewNamespaceController creates a new namespace controller.
|
||||
func NewNamespaceController(host string) *NamespaceController {
|
||||
return &NamespaceController{host: host, stopCh: make(chan struct{})}
|
||||
}
|
||||
|
||||
// Start starts the namespace controller.
|
||||
func (n *NamespaceController) Start() error {
|
||||
// Use the default QPS
|
||||
config := restclient.AddUserAgent(&restclient.Config{Host: n.host}, ncName)
|
||||
client, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientPool := dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
discoverResourcesFn := client.Discovery().ServerPreferredNamespacedResources
|
||||
informerFactory := informers.NewSharedInformerFactory(client, ncResyncPeriod)
|
||||
nc := namespacecontroller.NewNamespaceController(
|
||||
client,
|
||||
clientPool,
|
||||
discoverResourcesFn,
|
||||
informerFactory.Core().V1().Namespaces(),
|
||||
ncResyncPeriod, v1.FinalizerKubernetes,
|
||||
)
|
||||
informerFactory.Start(n.stopCh)
|
||||
go nc.Run(ncConcurrency, n.stopCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the namespace controller.
|
||||
func (n *NamespaceController) Stop() error {
|
||||
close(n.stopCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns the name of namespace controller.
|
||||
func (n *NamespaceController) Name() string {
|
||||
return ncName
|
||||
}
|
361
vendor/k8s.io/kubernetes/test/e2e_node/services/server.go
generated
vendored
Normal file
361
vendor/k8s.io/kubernetes/test/e2e_node/services/server.go
generated
vendored
Normal file
@ -0,0 +1,361 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
var serverStartTimeout = flag.Duration("server-start-timeout", time.Second*120, "Time to wait for each server to become healthy.")
|
||||
|
||||
// A server manages a separate server process started and killed with
|
||||
// commands.
|
||||
type server struct {
|
||||
// name is the name of the server, it is only used for logging.
|
||||
name string
|
||||
// startCommand is the command used to start the server
|
||||
startCommand *exec.Cmd
|
||||
// killCommand is the command used to stop the server. It is not required. If it
|
||||
// is not specified, `kill` will be used to stop the server.
|
||||
killCommand *exec.Cmd
|
||||
// restartCommand is the command used to restart the server. If provided, it will be used
|
||||
// instead of startCommand when restarting the server.
|
||||
restartCommand *exec.Cmd
|
||||
// healthCheckUrls is the urls used to check whether the server is ready.
|
||||
healthCheckUrls []string
|
||||
// outFilename is the name of the log file. The stdout and stderr of the server
|
||||
// will be redirected to this file.
|
||||
outFilename string
|
||||
// monitorParent determines whether the server should watch its parent process and exit
|
||||
// if its parent is gone.
|
||||
monitorParent bool
|
||||
// restartOnExit determines whether a restart loop is launched with the server
|
||||
restartOnExit bool
|
||||
// Writing to this channel, if it is not nil, stops the restart loop.
|
||||
// When tearing down a server, you should check for this channel and write to it if it exists.
|
||||
stopRestartingCh chan<- bool
|
||||
// Read from this to confirm that the restart loop has stopped.
|
||||
ackStopRestartingCh <-chan bool
|
||||
}
|
||||
|
||||
// newServer returns a new server with the given name, commands, health check
|
||||
// URLs, etc.
|
||||
func newServer(name string, start, kill, restart *exec.Cmd, urls []string, outputFileName string, monitorParent, restartOnExit bool) *server {
|
||||
return &server{
|
||||
name: name,
|
||||
startCommand: start,
|
||||
killCommand: kill,
|
||||
restartCommand: restart,
|
||||
healthCheckUrls: urls,
|
||||
outFilename: outputFileName,
|
||||
monitorParent: monitorParent,
|
||||
restartOnExit: restartOnExit,
|
||||
}
|
||||
}
|
||||
|
||||
// commandToString format command to string.
|
||||
func commandToString(c *exec.Cmd) string {
|
||||
if c == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(append([]string{c.Path}, c.Args[1:]...), " ")
|
||||
}
|
||||
|
||||
func (s *server) String() string {
|
||||
return fmt.Sprintf("server %q start-command: `%s`, kill-command: `%s`, restart-command: `%s`, health-check: %v, output-file: %q", s.name,
|
||||
commandToString(s.startCommand), commandToString(s.killCommand), commandToString(s.restartCommand), s.healthCheckUrls, s.outFilename)
|
||||
}
|
||||
|
||||
// readinessCheck checks whether services are ready via the supplied health
|
||||
// check URLs. Once there is an error in errCh, the function will stop waiting
|
||||
// and return the error.
|
||||
// TODO(random-liu): Move this to util
|
||||
func readinessCheck(name string, urls []string, errCh <-chan error) error {
|
||||
glog.Infof("Running readiness check for service %q", name)
|
||||
endTime := time.Now().Add(*serverStartTimeout)
|
||||
blockCh := make(chan error)
|
||||
defer close(blockCh)
|
||||
for endTime.After(time.Now()) {
|
||||
select {
|
||||
// We *always* want to run the health check if there is no error on the channel.
|
||||
// With systemd, reads from errCh report nil because cmd.Run() waits
|
||||
// on systemd-run, rather than the service process. systemd-run quickly
|
||||
// exits with status 0, causing the channel to be closed with no error. In
|
||||
// this case, you want to wait for the health check to complete, rather
|
||||
// than returning from readinessCheck as soon as the channel is closed.
|
||||
case err, ok := <-errCh:
|
||||
if ok { // The channel is not closed, this is a real error
|
||||
if err != nil { // If there is an error, return it
|
||||
return err
|
||||
}
|
||||
// If not, keep checking readiness.
|
||||
} else { // The channel is closed, this is only a zero value.
|
||||
// Replace the errCh with blockCh to avoid busy loop,
|
||||
// and keep checking readiness.
|
||||
errCh = blockCh
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
ready := true
|
||||
for _, url := range urls {
|
||||
resp, err := http.Head(url)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
ready = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if ready {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("e2e service %q readiness check timeout %v", name, *serverStartTimeout)
|
||||
}
|
||||
|
||||
// start starts the server by running its commands, monitors it with a health
|
||||
// check, and ensures that it is restarted if applicable.
|
||||
//
|
||||
// Note: restartOnExit == true requires len(s.healthCheckUrls) > 0 to work properly.
|
||||
func (s *server) start() error {
|
||||
glog.Infof("Starting server %q with command %q", s.name, commandToString(s.startCommand))
|
||||
errCh := make(chan error)
|
||||
|
||||
// Set up restart channels if the server is configured for restart on exit.
|
||||
var stopRestartingCh, ackStopRestartingCh chan bool
|
||||
if s.restartOnExit {
|
||||
if len(s.healthCheckUrls) == 0 {
|
||||
return fmt.Errorf("Tried to start %s which has s.restartOnExit == true, but no health check urls provided.", s)
|
||||
}
|
||||
|
||||
stopRestartingCh = make(chan bool)
|
||||
ackStopRestartingCh = make(chan bool)
|
||||
|
||||
s.stopRestartingCh = stopRestartingCh
|
||||
s.ackStopRestartingCh = ackStopRestartingCh
|
||||
}
|
||||
|
||||
// This goroutine actually runs the start command for the server.
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
|
||||
// Create the output filename
|
||||
outPath := path.Join(framework.TestContext.ReportDir, s.outFilename)
|
||||
outfile, err := os.Create(outPath)
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("failed to create file %q for `%s` %v.", outPath, s, err)
|
||||
return
|
||||
} else {
|
||||
glog.Infof("Output file for server %q: %v", s.name, outfile.Name())
|
||||
}
|
||||
defer outfile.Close()
|
||||
defer outfile.Sync()
|
||||
|
||||
// Set the command to write the output file
|
||||
s.startCommand.Stdout = outfile
|
||||
s.startCommand.Stderr = outfile
|
||||
|
||||
// If monitorParent is set, set Pdeathsig when starting the server.
|
||||
if s.monitorParent {
|
||||
// Death of this test process should kill the server as well.
|
||||
attrs := &syscall.SysProcAttr{}
|
||||
// Hack to set linux-only field without build tags.
|
||||
deathSigField := reflect.ValueOf(attrs).Elem().FieldByName("Pdeathsig")
|
||||
if deathSigField.IsValid() {
|
||||
deathSigField.Set(reflect.ValueOf(syscall.SIGTERM))
|
||||
} else {
|
||||
errCh <- fmt.Errorf("failed to set Pdeathsig field (non-linux build)")
|
||||
return
|
||||
}
|
||||
s.startCommand.SysProcAttr = attrs
|
||||
}
|
||||
|
||||
// Start the command
|
||||
err = s.startCommand.Start()
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("failed to run %s: %v", s, err)
|
||||
return
|
||||
}
|
||||
if !s.restartOnExit {
|
||||
glog.Infof("Waiting for server %q start command to complete", s.name)
|
||||
// If we aren't planning on restarting, ok to Wait() here to release resources.
|
||||
// Otherwise, we Wait() in the restart loop.
|
||||
err = s.startCommand.Wait()
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("failed to run start command for server %q: %v", s.name, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
usedStartCmd := true
|
||||
for {
|
||||
glog.Infof("Running health check for service %q", s.name)
|
||||
// Wait for an initial health check to pass, so that we are sure the server started.
|
||||
err := readinessCheck(s.name, s.healthCheckUrls, nil)
|
||||
if err != nil {
|
||||
if usedStartCmd {
|
||||
glog.Infof("Waiting for server %q start command to complete after initial health check failed", s.name)
|
||||
s.startCommand.Wait() // Release resources if necessary.
|
||||
}
|
||||
// This should not happen, immediately stop the e2eService process.
|
||||
glog.Fatalf("Restart loop readinessCheck failed for %s", s)
|
||||
} else {
|
||||
glog.Infof("Initial health check passed for service %q", s.name)
|
||||
}
|
||||
|
||||
// Initial health check passed, wait until a health check fails again.
|
||||
stillAlive:
|
||||
for {
|
||||
select {
|
||||
case <-stopRestartingCh:
|
||||
ackStopRestartingCh <- true
|
||||
return
|
||||
case <-time.After(time.Second):
|
||||
for _, url := range s.healthCheckUrls {
|
||||
resp, err := http.Head(url)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
break stillAlive
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if usedStartCmd {
|
||||
s.startCommand.Wait() // Release resources from last cmd
|
||||
usedStartCmd = false
|
||||
}
|
||||
if s.restartCommand != nil {
|
||||
// Always make a fresh copy of restartCommand before
|
||||
// running, we may have to restart multiple times
|
||||
s.restartCommand = &exec.Cmd{
|
||||
Path: s.restartCommand.Path,
|
||||
Args: s.restartCommand.Args,
|
||||
Env: s.restartCommand.Env,
|
||||
Dir: s.restartCommand.Dir,
|
||||
Stdin: s.restartCommand.Stdin,
|
||||
Stdout: s.restartCommand.Stdout,
|
||||
Stderr: s.restartCommand.Stderr,
|
||||
ExtraFiles: s.restartCommand.ExtraFiles,
|
||||
SysProcAttr: s.restartCommand.SysProcAttr,
|
||||
}
|
||||
// Run and wait for exit. This command is assumed to have
|
||||
// short duration, e.g. systemctl restart
|
||||
glog.Infof("Restarting server %q with restart command", s.name)
|
||||
err = s.restartCommand.Run()
|
||||
if err != nil {
|
||||
// This should not happen, immediately stop the e2eService process.
|
||||
glog.Fatalf("Restarting server %s with restartCommand failed. Error: %v.", s, err)
|
||||
}
|
||||
} else {
|
||||
s.startCommand = &exec.Cmd{
|
||||
Path: s.startCommand.Path,
|
||||
Args: s.startCommand.Args,
|
||||
Env: s.startCommand.Env,
|
||||
Dir: s.startCommand.Dir,
|
||||
Stdin: s.startCommand.Stdin,
|
||||
Stdout: s.startCommand.Stdout,
|
||||
Stderr: s.startCommand.Stderr,
|
||||
ExtraFiles: s.startCommand.ExtraFiles,
|
||||
SysProcAttr: s.startCommand.SysProcAttr,
|
||||
}
|
||||
glog.Infof("Restarting server %q with start command", s.name)
|
||||
err = s.startCommand.Start()
|
||||
usedStartCmd = true
|
||||
if err != nil {
|
||||
// This should not happen, immediately stop the e2eService process.
|
||||
glog.Fatalf("Restarting server %s with startCommand failed. Error: %v.", s, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return readinessCheck(s.name, s.healthCheckUrls, errCh)
|
||||
}
|
||||
|
||||
// kill runs the server's kill command.
|
||||
func (s *server) kill() error {
|
||||
glog.Infof("Kill server %q", s.name)
|
||||
name := s.name
|
||||
cmd := s.startCommand
|
||||
|
||||
// If s has a restart loop, turn it off.
|
||||
if s.restartOnExit {
|
||||
s.stopRestartingCh <- true
|
||||
<-s.ackStopRestartingCh
|
||||
}
|
||||
|
||||
if s.killCommand != nil {
|
||||
return s.killCommand.Run()
|
||||
}
|
||||
|
||||
if cmd == nil {
|
||||
return fmt.Errorf("could not kill %q because both `killCommand` and `startCommand` are nil", name)
|
||||
}
|
||||
|
||||
if cmd.Process == nil {
|
||||
glog.V(2).Infof("%q not running", name)
|
||||
return nil
|
||||
}
|
||||
pid := cmd.Process.Pid
|
||||
if pid <= 1 {
|
||||
return fmt.Errorf("invalid PID %d for %q", pid, name)
|
||||
}
|
||||
|
||||
// Attempt to shut down the process in a friendly manner before forcing it.
|
||||
waitChan := make(chan error)
|
||||
go func() {
|
||||
_, err := cmd.Process.Wait()
|
||||
waitChan <- err
|
||||
close(waitChan)
|
||||
}()
|
||||
|
||||
const timeout = 10 * time.Second
|
||||
for _, signal := range []string{"-TERM", "-KILL"} {
|
||||
glog.V(2).Infof("Killing process %d (%s) with %s", pid, name, signal)
|
||||
cmd := exec.Command("kill", signal, strconv.Itoa(pid))
|
||||
_, err := cmd.Output()
|
||||
if err != nil {
|
||||
glog.Errorf("Error signaling process %d (%s) with %s: %v", pid, name, signal, err)
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-waitChan:
|
||||
if err != nil {
|
||||
return fmt.Errorf("error stopping %q: %v", name, err)
|
||||
}
|
||||
// Success!
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
// Continue.
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to stop %q", name)
|
||||
}
|
195
vendor/k8s.io/kubernetes/test/e2e_node/services/services.go
generated
vendored
Normal file
195
vendor/k8s.io/kubernetes/test/e2e_node/services/services.go
generated
vendored
Normal file
@ -0,0 +1,195 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/kardianos/osext"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// E2EServices starts and stops e2e services in a separate process. The test
|
||||
// uses it to start and stop all e2e services.
|
||||
type E2EServices struct {
|
||||
// monitorParent determines whether the sub-processes should watch and die with the current
|
||||
// process.
|
||||
rmDirs []string
|
||||
monitorParent bool
|
||||
services *server
|
||||
kubelet *server
|
||||
logs logFiles
|
||||
}
|
||||
|
||||
// NewE2EServices returns a new E2EServices instance.
|
||||
func NewE2EServices(monitorParent bool) *E2EServices {
|
||||
return &E2EServices{
|
||||
monitorParent: monitorParent,
|
||||
// Special log files that need to be collected for additional debugging.
|
||||
logs: getLogFiles(),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the e2e services in another process by calling back into the
|
||||
// test binary. Returns when all e2e services are ready or an error.
|
||||
//
|
||||
// We want to statically link e2e services into the test binary, but we don't
|
||||
// want their glog output to pollute the test result. So we run the binary in
|
||||
// run-services-mode to start e2e services in another process.
|
||||
// The function starts 2 processes:
|
||||
// * internal e2e services: services which statically linked in the test binary - apiserver, etcd and
|
||||
// namespace controller.
|
||||
// * kubelet: kubelet binary is outside. (We plan to move main kubelet start logic out when we have
|
||||
// standard kubelet launcher)
|
||||
func (e *E2EServices) Start() error {
|
||||
var err error
|
||||
if !framework.TestContext.NodeConformance {
|
||||
// Start kubelet
|
||||
e.kubelet, err = e.startKubelet()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start kubelet: %v", err)
|
||||
}
|
||||
}
|
||||
e.services, err = e.startInternalServices()
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop stops the e2e services.
|
||||
func (e *E2EServices) Stop() {
|
||||
defer func() {
|
||||
if !framework.TestContext.NodeConformance {
|
||||
// Collect log files.
|
||||
e.collectLogFiles()
|
||||
}
|
||||
}()
|
||||
if e.services != nil {
|
||||
if err := e.services.kill(); err != nil {
|
||||
glog.Errorf("Failed to stop services: %v", err)
|
||||
}
|
||||
}
|
||||
if e.kubelet != nil {
|
||||
if err := e.kubelet.kill(); err != nil {
|
||||
glog.Errorf("Failed to stop kubelet: %v", err)
|
||||
}
|
||||
}
|
||||
if e.rmDirs != nil {
|
||||
for _, d := range e.rmDirs {
|
||||
err := os.RemoveAll(d)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete directory %s: %v", d, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunE2EServices actually start the e2e services. This function is used to
|
||||
// start e2e services in current process. This is only used in run-services-mode.
|
||||
func RunE2EServices() {
|
||||
// Populate global DefaultFeatureGate with value from TestContext.FeatureGates.
|
||||
// This way, statically-linked components see the same feature gate config as the test context.
|
||||
utilfeature.DefaultFeatureGate.Set(framework.TestContext.FeatureGates)
|
||||
e := newE2EServices()
|
||||
if err := e.run(); err != nil {
|
||||
glog.Fatalf("Failed to run e2e services: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// services.log is the combined log of all services
|
||||
servicesLogFile = "services.log"
|
||||
// LOG_VERBOSITY_LEVEL is consistent with the level used in a cluster e2e test.
|
||||
LOG_VERBOSITY_LEVEL = "4"
|
||||
)
|
||||
|
||||
// startInternalServices starts the internal services in a separate process.
|
||||
func (e *E2EServices) startInternalServices() (*server, error) {
|
||||
testBin, err := osext.Executable()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get current binary: %v", err)
|
||||
}
|
||||
// Pass all flags into the child process, so that it will see the same flag set.
|
||||
startCmd := exec.Command(testBin, append([]string{"--run-services-mode"}, os.Args[1:]...)...)
|
||||
server := newServer("services", startCmd, nil, nil, getServicesHealthCheckURLs(), servicesLogFile, e.monitorParent, false)
|
||||
return server, server.start()
|
||||
}
|
||||
|
||||
// collectLogFiles collects logs of interest either via journalctl or by creating sym
|
||||
// links. Since we scp files from the remote directory, symlinks will be
|
||||
// treated as normal files and file contents will be copied over.
|
||||
func (e *E2EServices) collectLogFiles() {
|
||||
// Nothing to do if report dir is not specified.
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
return
|
||||
}
|
||||
glog.Info("Fetching log files...")
|
||||
journaldFound := isJournaldAvailable()
|
||||
for targetFileName, log := range e.logs {
|
||||
targetLink := path.Join(framework.TestContext.ReportDir, targetFileName)
|
||||
if journaldFound {
|
||||
// Skip log files that do not have an equivalent in journald-based machines.
|
||||
if len(log.JournalctlCommand) == 0 {
|
||||
continue
|
||||
}
|
||||
glog.Infof("Get log file %q with journalctl command %v.", targetFileName, log.JournalctlCommand)
|
||||
out, err := exec.Command("journalctl", log.JournalctlCommand...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get %q from journald: %v, %v", targetFileName, string(out), err)
|
||||
} else {
|
||||
if err = ioutil.WriteFile(targetLink, out, 0644); err != nil {
|
||||
glog.Errorf("failed to write logs to %q: %v", targetLink, err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
for _, file := range log.Files {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
// Expected file not found on this distro.
|
||||
continue
|
||||
}
|
||||
if err := copyLogFile(file, targetLink); err != nil {
|
||||
glog.Error(err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isJournaldAvailable returns whether the system executing the tests uses
|
||||
// journald.
|
||||
func isJournaldAvailable() bool {
|
||||
_, err := exec.LookPath("journalctl")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func copyLogFile(src, target string) error {
|
||||
// If not a journald based distro, then just symlink files.
|
||||
if out, err := exec.Command("cp", src, target).CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to copy %q to %q: %v, %v", src, target, out, err)
|
||||
}
|
||||
if out, err := exec.Command("chmod", "a+r", target).CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to make log file %q world readable: %v, %v", target, out, err)
|
||||
}
|
||||
return nil
|
||||
}
|
34
vendor/k8s.io/kubernetes/test/e2e_node/services/util.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/test/e2e_node/services/util.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// terminationSignals are signals that cause the program to exit in the
|
||||
// supported platforms (linux, darwin, windows).
|
||||
var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT}
|
||||
|
||||
// waitForTerminationSignal waits for termination signal.
|
||||
func waitForTerminationSignal() {
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, terminationSignals...)
|
||||
<-sig
|
||||
}
|
70
vendor/k8s.io/kubernetes/test/e2e_node/simple_mount.go
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/test/e2e_node/simple_mount.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("SimpleMount", func() {
|
||||
f := framework.NewDefaultFramework("simple-mount-test")
|
||||
|
||||
// This is a very simple test that exercises the Kubelet's mounter code path.
|
||||
// If the mount fails, the pod will not be able to run, and CreateSync will timeout.
|
||||
It("should be able to mount an emptydir on a container", func() {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-mount-pod",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "simple-mount-container",
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "simply-mounted-volume",
|
||||
MountPath: "/opt/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "simply-mounted-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
Medium: "Memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podClient := f.PodClient()
|
||||
pod = podClient.CreateSync(pod)
|
||||
|
||||
})
|
||||
})
|
404
vendor/k8s.io/kubernetes/test/e2e_node/summary_test.go
generated
vendored
Normal file
404
vendor/k8s.io/kubernetes/test/e2e_node/summary_test.go
generated
vendored
Normal file
@ -0,0 +1,404 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
systemdutil "github.com/coreos/go-systemd/util"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/gstruct"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Summary API", func() {
|
||||
f := framework.NewDefaultFramework("summary-test")
|
||||
Context("when querying /stats/summary", func() {
|
||||
AfterEach(func() {
|
||||
if !CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
if framework.TestContext.DumpLogsOnFailure {
|
||||
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
||||
}
|
||||
By("Recording processes in system cgroups")
|
||||
recordSystemCgroupProcesses()
|
||||
})
|
||||
It("should report resource usage through the stats api", func() {
|
||||
const pod0 = "stats-busybox-0"
|
||||
const pod1 = "stats-busybox-1"
|
||||
|
||||
By("Creating test pods")
|
||||
numRestarts := int32(1)
|
||||
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
||||
f.PodClient().CreateBatch(pods)
|
||||
|
||||
Eventually(func() error {
|
||||
for _, pod := range pods {
|
||||
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 5*time.Second).Should(BeNil())
|
||||
|
||||
// Wait for cAdvisor to collect 2 stats points
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Setup expectations.
|
||||
const (
|
||||
maxStartAge = time.Hour * 24 * 365 // 1 year
|
||||
maxStatsAge = time.Minute
|
||||
)
|
||||
fsCapacityBounds := bounded(100*framework.Mb, 100*framework.Gb)
|
||||
// Expectations for system containers.
|
||||
sysContExpectations := func() types.GomegaMatcher {
|
||||
return gstruct.MatchAllFields(gstruct.Fields{
|
||||
"Name": gstruct.Ignore(),
|
||||
"StartTime": recent(maxStartAge),
|
||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"UsageNanoCores": bounded(10000, 2E9),
|
||||
"UsageCoreNanoSeconds": bounded(10000000, 1E15),
|
||||
}),
|
||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
// We don't limit system container memory.
|
||||
"AvailableBytes": BeNil(),
|
||||
"UsageBytes": bounded(1*framework.Mb, 10*framework.Gb),
|
||||
"WorkingSetBytes": bounded(1*framework.Mb, 10*framework.Gb),
|
||||
// this now returns /sys/fs/cgroup/memory.stat total_rss
|
||||
"RSSBytes": bounded(1*framework.Mb, 1*framework.Gb),
|
||||
"PageFaults": bounded(1000, 1E9),
|
||||
"MajorPageFaults": bounded(0, 100000),
|
||||
}),
|
||||
"Accelerators": BeEmpty(),
|
||||
"Rootfs": BeNil(),
|
||||
"Logs": BeNil(),
|
||||
"UserDefinedMetrics": BeEmpty(),
|
||||
})
|
||||
}
|
||||
systemContainers := gstruct.Elements{
|
||||
"kubelet": sysContExpectations(),
|
||||
"runtime": sysContExpectations(),
|
||||
}
|
||||
// The Kubelet only manages the 'misc' system container if the host is not running systemd.
|
||||
if !systemdutil.IsRunningSystemd() {
|
||||
framework.Logf("Host not running systemd; expecting 'misc' system container.")
|
||||
miscContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
|
||||
// Misc processes are system-dependent, so relax the memory constraints.
|
||||
miscContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
// We don't limit system container memory.
|
||||
"AvailableBytes": BeNil(),
|
||||
"UsageBytes": bounded(100*framework.Kb, 10*framework.Gb),
|
||||
"WorkingSetBytes": bounded(100*framework.Kb, 10*framework.Gb),
|
||||
"RSSBytes": bounded(100*framework.Kb, 1*framework.Gb),
|
||||
"PageFaults": bounded(1000, 1E9),
|
||||
"MajorPageFaults": bounded(0, 100000),
|
||||
})
|
||||
systemContainers["misc"] = miscContExpectations
|
||||
}
|
||||
// Expectations for pods.
|
||||
podExpectations := gstruct.MatchAllFields(gstruct.Fields{
|
||||
"PodRef": gstruct.Ignore(),
|
||||
"StartTime": recent(maxStartAge),
|
||||
"Containers": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
|
||||
"busybox-container": gstruct.MatchAllFields(gstruct.Fields{
|
||||
"Name": Equal("busybox-container"),
|
||||
"StartTime": recent(maxStartAge),
|
||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"UsageNanoCores": bounded(100000, 1E9),
|
||||
"UsageCoreNanoSeconds": bounded(10000000, 1E11),
|
||||
}),
|
||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": bounded(1*framework.Kb, 10*framework.Mb),
|
||||
"UsageBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||
"WorkingSetBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||
"RSSBytes": bounded(1*framework.Kb, framework.Mb),
|
||||
"PageFaults": bounded(100, 1000000),
|
||||
"MajorPageFaults": bounded(0, 10),
|
||||
}),
|
||||
"Accelerators": BeEmpty(),
|
||||
"Rootfs": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": fsCapacityBounds,
|
||||
"CapacityBytes": fsCapacityBounds,
|
||||
"UsedBytes": bounded(framework.Kb, 10*framework.Mb),
|
||||
"InodesFree": bounded(1E4, 1E8),
|
||||
"Inodes": bounded(1E4, 1E8),
|
||||
"InodesUsed": bounded(0, 1E8),
|
||||
}),
|
||||
"Logs": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": fsCapacityBounds,
|
||||
"CapacityBytes": fsCapacityBounds,
|
||||
"UsedBytes": bounded(framework.Kb, 10*framework.Mb),
|
||||
"InodesFree": bounded(1E4, 1E8),
|
||||
"Inodes": bounded(1E4, 1E8),
|
||||
"InodesUsed": bounded(0, 1E8),
|
||||
}),
|
||||
"UserDefinedMetrics": BeEmpty(),
|
||||
}),
|
||||
}),
|
||||
"Network": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
|
||||
"Name": Equal("eth0"),
|
||||
"RxBytes": bounded(10, 10*framework.Mb),
|
||||
"RxErrors": bounded(0, 1000),
|
||||
"TxBytes": bounded(10, 10*framework.Mb),
|
||||
"TxErrors": bounded(0, 1000),
|
||||
}),
|
||||
"Interfaces": Not(BeNil()),
|
||||
}),
|
||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"UsageNanoCores": bounded(100000, 1E9),
|
||||
"UsageCoreNanoSeconds": bounded(10000000, 1E11),
|
||||
}),
|
||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": bounded(1*framework.Kb, 10*framework.Mb),
|
||||
"UsageBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||
"WorkingSetBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||
"RSSBytes": bounded(1*framework.Kb, framework.Mb),
|
||||
"PageFaults": bounded(0, 1000000),
|
||||
"MajorPageFaults": bounded(0, 10),
|
||||
}),
|
||||
"VolumeStats": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
|
||||
"test-empty-dir": gstruct.MatchAllFields(gstruct.Fields{
|
||||
"Name": Equal("test-empty-dir"),
|
||||
"PVCRef": BeNil(),
|
||||
"FsStats": gstruct.MatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": fsCapacityBounds,
|
||||
"CapacityBytes": fsCapacityBounds,
|
||||
"UsedBytes": bounded(framework.Kb, 1*framework.Mb),
|
||||
"InodesFree": bounded(1E4, 1E8),
|
||||
"Inodes": bounded(1E4, 1E8),
|
||||
"InodesUsed": bounded(0, 1E8),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
"EphemeralStorage": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": fsCapacityBounds,
|
||||
"CapacityBytes": fsCapacityBounds,
|
||||
"UsedBytes": bounded(framework.Kb, 21*framework.Mb),
|
||||
"InodesFree": bounded(1E4, 1E8),
|
||||
"Inodes": bounded(1E4, 1E8),
|
||||
"InodesUsed": bounded(0, 1E8),
|
||||
}),
|
||||
})
|
||||
|
||||
matchExpectations := ptrMatchAllFields(gstruct.Fields{
|
||||
"Node": gstruct.MatchAllFields(gstruct.Fields{
|
||||
"NodeName": Equal(framework.TestContext.NodeName),
|
||||
"StartTime": recent(maxStartAge),
|
||||
"SystemContainers": gstruct.MatchAllElements(summaryObjectID, systemContainers),
|
||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"UsageNanoCores": bounded(100E3, 2E9),
|
||||
"UsageCoreNanoSeconds": bounded(1E9, 1E15),
|
||||
}),
|
||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": bounded(100*framework.Mb, 100*framework.Gb),
|
||||
"UsageBytes": bounded(10*framework.Mb, 10*framework.Gb),
|
||||
"WorkingSetBytes": bounded(10*framework.Mb, 10*framework.Gb),
|
||||
// this now returns /sys/fs/cgroup/memory.stat total_rss
|
||||
"RSSBytes": bounded(1*framework.Kb, 1*framework.Gb),
|
||||
"PageFaults": bounded(1000, 1E9),
|
||||
"MajorPageFaults": bounded(0, 100000),
|
||||
}),
|
||||
// TODO(#28407): Handle non-eth0 network interface names.
|
||||
"Network": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
|
||||
"Name": Or(BeEmpty(), Equal("eth0")),
|
||||
"RxBytes": Or(BeNil(), bounded(1*framework.Mb, 100*framework.Gb)),
|
||||
"RxErrors": Or(BeNil(), bounded(0, 100000)),
|
||||
"TxBytes": Or(BeNil(), bounded(10*framework.Kb, 10*framework.Gb)),
|
||||
"TxErrors": Or(BeNil(), bounded(0, 100000)),
|
||||
}),
|
||||
"Interfaces": Not(BeNil()),
|
||||
}),
|
||||
"Fs": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": fsCapacityBounds,
|
||||
"CapacityBytes": fsCapacityBounds,
|
||||
// we assume we are not running tests on machines < 10tb of disk
|
||||
"UsedBytes": bounded(framework.Kb, 10*framework.Tb),
|
||||
"InodesFree": bounded(1E4, 1E8),
|
||||
"Inodes": bounded(1E4, 1E8),
|
||||
"InodesUsed": bounded(0, 1E8),
|
||||
}),
|
||||
"Runtime": ptrMatchAllFields(gstruct.Fields{
|
||||
"ImageFs": ptrMatchAllFields(gstruct.Fields{
|
||||
"Time": recent(maxStatsAge),
|
||||
"AvailableBytes": fsCapacityBounds,
|
||||
"CapacityBytes": fsCapacityBounds,
|
||||
// we assume we are not running tests on machines < 10tb of disk
|
||||
"UsedBytes": bounded(framework.Kb, 10*framework.Tb),
|
||||
"InodesFree": bounded(1E4, 1E8),
|
||||
"Inodes": bounded(1E4, 1E8),
|
||||
"InodesUsed": bounded(0, 1E8),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
// Ignore extra pods since the tests run in parallel.
|
||||
"Pods": gstruct.MatchElements(summaryObjectID, gstruct.IgnoreExtras, gstruct.Elements{
|
||||
fmt.Sprintf("%s::%s", f.Namespace.Name, pod0): podExpectations,
|
||||
fmt.Sprintf("%s::%s", f.Namespace.Name, pod1): podExpectations,
|
||||
}),
|
||||
})
|
||||
|
||||
By("Validating /stats/summary")
|
||||
// Give pods a minute to actually start up.
|
||||
Eventually(getNodeSummary, 1*time.Minute, 15*time.Second).Should(matchExpectations)
|
||||
// Then the summary should match the expectations a few more times.
|
||||
Consistently(getNodeSummary, 30*time.Second, 15*time.Second).Should(matchExpectations)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func getSummaryTestPods(f *framework.Framework, numRestarts int32, names ...string) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0, len(names))
|
||||
for _, name := range names {
|
||||
pods = append(pods, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox-container",
|
||||
Image: busyboxImage,
|
||||
Command: getRestartingContainerCommand("/test-empty-dir-mnt", 0, numRestarts, "ping -c 1 google.com; echo 'hello world' >> /test-empty-dir-mnt/file;"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
// Must set memory limit to get MemoryStats.AvailableBytes
|
||||
v1.ResourceMemory: resource.MustParse("10M"),
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0",
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
// TODO(#28393): Test secret volumes
|
||||
// TODO(#28394): Test hostpath volumes
|
||||
{Name: "test-empty-dir", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
// Mapping function for gstruct.MatchAllElements
|
||||
func summaryObjectID(element interface{}) string {
|
||||
switch el := element.(type) {
|
||||
case stats.PodStats:
|
||||
return fmt.Sprintf("%s::%s", el.PodRef.Namespace, el.PodRef.Name)
|
||||
case stats.ContainerStats:
|
||||
return el.Name
|
||||
case stats.VolumeStats:
|
||||
return el.Name
|
||||
case stats.UserDefinedMetric:
|
||||
return el.Name
|
||||
default:
|
||||
framework.Failf("Unknown type: %T", el)
|
||||
return "???"
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience functions for common matcher combinations.
|
||||
func ptrMatchAllFields(fields gstruct.Fields) types.GomegaMatcher {
|
||||
return gstruct.PointTo(gstruct.MatchAllFields(fields))
|
||||
}
|
||||
|
||||
func bounded(lower, upper interface{}) types.GomegaMatcher {
|
||||
return gstruct.PointTo(And(
|
||||
BeNumerically(">=", lower),
|
||||
BeNumerically("<=", upper)))
|
||||
}
|
||||
|
||||
func recent(d time.Duration) types.GomegaMatcher {
|
||||
return WithTransform(func(t metav1.Time) time.Time {
|
||||
return t.Time
|
||||
}, And(
|
||||
BeTemporally(">=", time.Now().Add(-d)),
|
||||
// Now() is the test start time, not the match time, so permit a few extra minutes.
|
||||
BeTemporally("<", time.Now().Add(2*time.Minute))))
|
||||
}
|
||||
|
||||
func recordSystemCgroupProcesses() {
|
||||
cfg, err := getCurrentKubeletConfig()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to read kubelet config: %v", err)
|
||||
return
|
||||
}
|
||||
cgroups := map[string]string{
|
||||
"kubelet": cfg.KubeletCgroups,
|
||||
"misc": cfg.SystemCgroups,
|
||||
}
|
||||
for name, cgroup := range cgroups {
|
||||
if cgroup == "" {
|
||||
framework.Logf("Skipping unconfigured cgroup %s", name)
|
||||
continue
|
||||
}
|
||||
|
||||
pids, err := ioutil.ReadFile(fmt.Sprintf("/sys/fs/cgroup/cpu/%s/cgroup.procs", cgroup))
|
||||
if err != nil {
|
||||
framework.Logf("Failed to read processes in cgroup %s: %v", name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
framework.Logf("Processes in %s cgroup (%s):", name, cgroup)
|
||||
for _, pid := range strings.Fields(string(pids)) {
|
||||
path := fmt.Sprintf("/proc/%s/cmdline", pid)
|
||||
cmd, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
framework.Logf(" Failed to read %s: %v", path, err)
|
||||
} else {
|
||||
framework.Logf(" %s", cmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user