mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
Fresh dep ensure
This commit is contained in:
124
vendor/k8s.io/kubernetes/test/e2e_node/BUILD
generated
vendored
124
vendor/k8s.io/kubernetes/test/e2e_node/BUILD
generated
vendored
@ -5,38 +5,42 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"container.go",
|
||||
"benchmark_util.go",
|
||||
"device_plugin.go",
|
||||
"doc.go",
|
||||
"docker_util.go",
|
||||
"framework.go",
|
||||
"gpu_device_plugin.go",
|
||||
"image_list.go",
|
||||
"node_problem_detector_linux.go",
|
||||
"resource_collector.go",
|
||||
"util.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"benchmark_util.go",
|
||||
"node_problem_detector_linux.go",
|
||||
"resource_collector.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/podresources:go_default_library",
|
||||
"//pkg/kubelet/apis/podresources/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/cm/devicemanager:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/util/codec:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/remote:go_default_library",
|
||||
"//pkg/kubelet/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
@ -45,34 +49,27 @@ go_library(
|
||||
"//vendor/github.com/coreos/go-systemd/util:go_default_library",
|
||||
"//vendor/github.com/docker/docker/api/types:go_default_library",
|
||||
"//vendor/github.com/docker/docker/client:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/e2e_node/perftype:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/client/v2:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
@ -83,36 +80,31 @@ go_test(
|
||||
srcs = [
|
||||
"apparmor_test.go",
|
||||
"container_log_rotation_test.go",
|
||||
"container_manager_test.go",
|
||||
"cpu_manager_test.go",
|
||||
"critical_pod_test.go",
|
||||
"density_test.go",
|
||||
"docker_test.go",
|
||||
"dockershim_checkpoint_test.go",
|
||||
"dynamic_kubelet_config_test.go",
|
||||
"e2e_node_suite_test.go",
|
||||
"eviction_test.go",
|
||||
"garbage_collector_test.go",
|
||||
"gke_environment_test.go",
|
||||
"hugepages_test.go",
|
||||
"image_id_test.go",
|
||||
"kubelet_test.go",
|
||||
"lifecycle_hook_test.go",
|
||||
"log_path_test.go",
|
||||
"mirror_pod_test.go",
|
||||
"node_container_manager_test.go",
|
||||
"node_perf_test.go",
|
||||
"pods_container_manager_test.go",
|
||||
"resource_usage_test.go",
|
||||
"restart_test.go",
|
||||
"runtime_conformance_test.go",
|
||||
"security_context_test.go",
|
||||
"summary_test.go",
|
||||
"volume_manager_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"container_manager_test.go",
|
||||
"density_test.go",
|
||||
"e2e_node_suite_test.go",
|
||||
"node_container_manager_test.go",
|
||||
"resource_usage_test.go",
|
||||
"restart_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
tags = ["e2e"],
|
||||
deps = [
|
||||
@ -120,9 +112,9 @@ go_test(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
||||
@ -136,47 +128,49 @@ go_test(
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e_node/perf/workloads:go_default_library",
|
||||
"//test/e2e_node/services:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
"//vendor/github.com/coreos/go-systemd/util:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/gstruct:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e_node/system:go_default_library",
|
||||
"//cmd/kubeadm/app/util/system:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
@ -204,12 +198,12 @@ filegroup(
|
||||
":package-srcs",
|
||||
"//test/e2e_node/builder:all-srcs",
|
||||
"//test/e2e_node/environment:all-srcs",
|
||||
"//test/e2e_node/perf/workloads:all-srcs",
|
||||
"//test/e2e_node/perftype:all-srcs",
|
||||
"//test/e2e_node/remote:all-srcs",
|
||||
"//test/e2e_node/runner/local:all-srcs",
|
||||
"//test/e2e_node/runner/remote:all-srcs",
|
||||
"//test/e2e_node/services:all-srcs",
|
||||
"//test/e2e_node/system:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e_node/apparmor_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e_node/apparmor_test.go
generated
vendored
@ -18,6 +18,7 @@ package e2e_node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -31,13 +32,14 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
|
||||
@ -130,14 +132,14 @@ func loadTestProfiles() error {
|
||||
// apparmor_parser does not always return an error code, so consider any stderr output an error.
|
||||
if err != nil || stderr.Len() > 0 {
|
||||
if stderr.Len() > 0 {
|
||||
glog.Warning(stderr.String())
|
||||
klog.Warning(stderr.String())
|
||||
}
|
||||
if len(out) > 0 {
|
||||
glog.Infof("apparmor_parser: %s", out)
|
||||
klog.Infof("apparmor_parser: %s", out)
|
||||
}
|
||||
return fmt.Errorf("failed to load profiles: %v", err)
|
||||
}
|
||||
glog.V(2).Infof("Loaded profiles: %v", out)
|
||||
klog.V(2).Infof("Loaded profiles: %v", out)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -151,7 +153,9 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.
|
||||
// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
|
||||
w, err := f.PodClient().Watch(metav1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
|
||||
framework.ExpectNoError(err)
|
||||
_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) {
|
||||
switch e.Type {
|
||||
case watch.Deleted:
|
||||
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
|
||||
@ -207,7 +211,7 @@ func isAppArmorEnabled() bool {
|
||||
if len(matches) == 2 {
|
||||
version, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
glog.Errorf("Error parsing GCI version from NodeName %q: %v", framework.TestContext.NodeName, err)
|
||||
klog.Errorf("Error parsing GCI version from NodeName %q: %v", framework.TestContext.NodeName, err)
|
||||
return false
|
||||
}
|
||||
return version >= 54
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/builder/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/builder/BUILD
generated
vendored
@ -11,7 +11,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/builder",
|
||||
deps = [
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e_node/builder/build.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e_node/builder/build.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -38,7 +38,7 @@ var buildTargets = []string{
|
||||
}
|
||||
|
||||
func BuildGo() error {
|
||||
glog.Infof("Building k8s binaries...")
|
||||
klog.Infof("Building k8s binaries...")
|
||||
k8sRoot, err := utils.GetK8sRootDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate kubernetes root directory %v.", err)
|
||||
@ -90,7 +90,7 @@ func getK8sBin(bin string) (string, error) {
|
||||
func GetKubeletServerBin() string {
|
||||
bin, err := getK8sBin("kubelet")
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not locate kubelet binary %v.", err)
|
||||
klog.Fatalf("Could not locate kubelet binary %v.", err)
|
||||
}
|
||||
return bin
|
||||
}
|
||||
|
128
vendor/k8s.io/kubernetes/test/e2e_node/container.go
generated
vendored
128
vendor/k8s.io/kubernetes/test/e2e_node/container.go
generated
vendored
@ -1,128 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// One pod one container
|
||||
type ConformanceContainer struct {
|
||||
Container v1.Container
|
||||
RestartPolicy v1.RestartPolicy
|
||||
Volumes []v1.Volume
|
||||
ImagePullSecrets []string
|
||||
|
||||
PodClient *framework.PodClient
|
||||
podName string
|
||||
PodSecurityContext *v1.PodSecurityContext
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Create() {
|
||||
cc.podName = cc.Container.Name + string(uuid.NewUUID())
|
||||
imagePullSecrets := []v1.LocalObjectReference{}
|
||||
for _, s := range cc.ImagePullSecrets {
|
||||
imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: s})
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cc.podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: cc.RestartPolicy,
|
||||
Containers: []v1.Container{
|
||||
cc.Container,
|
||||
},
|
||||
SecurityContext: cc.PodSecurityContext,
|
||||
Volumes: cc.Volumes,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
},
|
||||
}
|
||||
cc.PodClient.Create(pod)
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Delete() error {
|
||||
return cc.PodClient.Delete(cc.podName, metav1.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) IsReady() (bool, error) {
|
||||
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return podutil.IsPodReady(pod), nil
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
|
||||
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return v1.PodUnknown, err
|
||||
}
|
||||
return pod.Status.Phase, nil
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
|
||||
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return v1.ContainerStatus{}, err
|
||||
}
|
||||
statuses := pod.Status.ContainerStatuses
|
||||
if len(statuses) != 1 || statuses[0].Name != cc.Container.Name {
|
||||
return v1.ContainerStatus{}, fmt.Errorf("unexpected container statuses %v", statuses)
|
||||
}
|
||||
return statuses[0], nil
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Present() (bool, error) {
|
||||
_, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
type ContainerState string
|
||||
|
||||
const (
|
||||
ContainerStateWaiting ContainerState = "Waiting"
|
||||
ContainerStateRunning ContainerState = "Running"
|
||||
ContainerStateTerminated ContainerState = "Terminated"
|
||||
ContainerStateUnknown ContainerState = "Unknown"
|
||||
)
|
||||
|
||||
func GetContainerState(state v1.ContainerState) ContainerState {
|
||||
if state.Waiting != nil {
|
||||
return ContainerStateWaiting
|
||||
}
|
||||
if state.Running != nil {
|
||||
return ContainerStateRunning
|
||||
}
|
||||
if state.Terminated != nil {
|
||||
return ContainerStateTerminated
|
||||
}
|
||||
return ContainerStateUnknown
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/e2e_node/container_log_rotation_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/container_log_rotation_test.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubelogs "k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/container_manager_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/container_manager_test.go
generated
vendored
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Name: podName,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/cpu_manager_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/cpu_manager_test.go
generated
vendored
@ -26,8 +26,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e_node/critical_pod_test.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e_node/critical_pod_test.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -45,6 +45,9 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
|
||||
|
||||
Context("when we need to admit a critical pod", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
if initialConfig.FeatureGates == nil {
|
||||
initialConfig.FeatureGates = make(map[string]bool)
|
||||
}
|
||||
initialConfig.FeatureGates[string(features.ExperimentalCriticalPodAnnotation)] = true
|
||||
})
|
||||
|
||||
@ -142,9 +145,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
|
||||
pod.ObjectMeta.Annotations = map[string]string{
|
||||
kubelettypes.CriticalPodAnnotationKey: "",
|
||||
}
|
||||
Expect(kubelettypes.IsCriticalPod(pod)).To(BeTrue(), "pod should be a critical pod")
|
||||
Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(BeTrue(), "pod should be a critical pod")
|
||||
} else {
|
||||
Expect(kubelettypes.IsCriticalPod(pod)).To(BeFalse(), "pod should not be a critical pod")
|
||||
Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(BeFalse(), "pod should not be a critical pod")
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/density_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/density_test.go
generated
vendored
@ -31,7 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
98
vendor/k8s.io/kubernetes/test/e2e_node/device_plugin.go
generated
vendored
98
vendor/k8s.io/kubernetes/test/e2e_node/device_plugin.go
generated
vendored
@ -26,9 +26,10 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
@ -40,14 +41,31 @@ import (
|
||||
|
||||
const (
|
||||
// fake resource name
|
||||
resourceName = "fake.com/resource"
|
||||
resourceName = "fake.com/resource"
|
||||
resourceNameWithProbeSupport = "fake.com/resource2"
|
||||
)
|
||||
|
||||
// Serial because the test restarts Kubelet
|
||||
var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature:DevicePlugin][Serial]", func() {
|
||||
f := framework.NewDefaultFramework("device-plugin-errors")
|
||||
testDevicePlugin(f, false, pluginapi.DevicePluginPath)
|
||||
})
|
||||
|
||||
var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]", func() {
|
||||
f := framework.NewDefaultFramework("device-plugin-errors")
|
||||
testDevicePlugin(f, true, "/var/lib/kubelet/plugins_registry")
|
||||
})
|
||||
|
||||
func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSockDir string) {
|
||||
Context("DevicePlugin", func() {
|
||||
By("Enabling support for Kubelet Plugins Watcher")
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
if initialConfig.FeatureGates == nil {
|
||||
initialConfig.FeatureGates = map[string]bool{}
|
||||
}
|
||||
initialConfig.FeatureGates[string(features.KubeletPluginsWatcher)] = enablePluginWatcher
|
||||
initialConfig.FeatureGates[string(features.KubeletPodResources)] = true
|
||||
})
|
||||
It("Verifies the Kubelet device plugin functionality.", func() {
|
||||
By("Start stub device plugin")
|
||||
// fake devices for e2e test
|
||||
@ -56,15 +74,16 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature
|
||||
{ID: "Dev-2", Health: pluginapi.Healthy},
|
||||
}
|
||||
|
||||
socketPath := pluginapi.DevicePluginPath + "dp." + fmt.Sprintf("%d", time.Now().Unix())
|
||||
socketPath := pluginSockDir + "dp." + fmt.Sprintf("%d", time.Now().Unix())
|
||||
framework.Logf("socketPath %v", socketPath)
|
||||
|
||||
dp1 := dm.NewDevicePluginStub(devs, socketPath)
|
||||
dp1 := dm.NewDevicePluginStub(devs, socketPath, resourceName, false)
|
||||
dp1.SetAllocFunc(stubAllocFunc)
|
||||
err := dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Register resources")
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, false)
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for the resource exported by the stub device plugin to become available on the local node")
|
||||
@ -83,6 +102,17 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature
|
||||
devId1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devId1).To(Not(Equal("")))
|
||||
|
||||
podResources, err := getNodeDevices()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(len(podResources.PodResources)).To(Equal(1))
|
||||
Expect(podResources.PodResources[0].Name).To(Equal(pod1.Name))
|
||||
Expect(podResources.PodResources[0].Namespace).To(Equal(pod1.Namespace))
|
||||
Expect(len(podResources.PodResources[0].Containers)).To(Equal(1))
|
||||
Expect(podResources.PodResources[0].Containers[0].Name).To(Equal(pod1.Spec.Containers[0].Name))
|
||||
Expect(len(podResources.PodResources[0].Containers[0].Devices)).To(Equal(1))
|
||||
Expect(podResources.PodResources[0].Containers[0].Devices[0].ResourceName).To(Equal(resourceName))
|
||||
Expect(len(podResources.PodResources[0].Containers[0].Devices[0].DeviceIds)).To(Equal(1))
|
||||
|
||||
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -92,26 +122,38 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature
|
||||
devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devIdAfterRestart).To(Equal(devId1))
|
||||
|
||||
restartTime := time.Now()
|
||||
By("Restarting Kubelet")
|
||||
restartKubelet()
|
||||
|
||||
// We need to wait for node to be ready before re-registering stub device plugin.
|
||||
// Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts.
|
||||
By("Wait for node is ready")
|
||||
Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, cond := range node.Status.Conditions {
|
||||
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||
|
||||
By("Re-Register resources")
|
||||
dp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false)
|
||||
dp1.SetAllocFunc(stubAllocFunc)
|
||||
err = dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
By("Confirming that after a kubelet restart, fake-device assignement is kept")
|
||||
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
||||
By("Wait for node is ready")
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||
|
||||
By("Re-Register resources after kubelet restart")
|
||||
dp1 = dm.NewDevicePluginStub(devs, socketPath)
|
||||
dp1.SetAllocFunc(stubAllocFunc)
|
||||
err = dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, false)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for resource to become available on the local node after re-registration")
|
||||
Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
@ -149,12 +191,12 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
|
||||
By("Re-register resources")
|
||||
dp1 = dm.NewDevicePluginStub(devs, socketPath)
|
||||
dp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false)
|
||||
dp1.SetAllocFunc(stubAllocFunc)
|
||||
err = dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, false)
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
|
||||
@ -175,24 +217,12 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature
|
||||
return numberOfDevicesCapacity(node, resourceName) <= 0
|
||||
}, 10*time.Minute, framework.Poll).Should(BeTrue())
|
||||
|
||||
By("Restarting Kubelet second time.")
|
||||
restartKubelet()
|
||||
|
||||
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet Eventually.")
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
||||
ensurePodContainerRestart(f, pod2.Name, pod2.Name)
|
||||
devIdRestart2 = parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||
Expect(devIdRestart2).To(Equal(devId2))
|
||||
|
||||
// Cleanup
|
||||
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// makeBusyboxPod returns a simple Pod spec with a busybox container
|
||||
// that requests resourceName and runs the specified command.
|
||||
@ -235,7 +265,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
|
||||
currentCount = p.Status.ContainerStatuses[0].RestartCount
|
||||
framework.Logf("initial %v, current %v", initialCount, currentCount)
|
||||
return currentCount > initialCount
|
||||
}, 2*time.Minute, framework.Poll).Should(BeTrue())
|
||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||
}
|
||||
|
||||
// parseLog returns the matching string for the specified regular expression parsed from the container logs.
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/docker_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/docker_test.go
generated
vendored
@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]"
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
66
vendor/k8s.io/kubernetes/test/e2e_node/dynamic_kubelet_config_test.go
generated
vendored
66
vendor/k8s.io/kubernetes/test/e2e_node/dynamic_kubelet_config_test.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
@ -418,6 +418,70 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
})
|
||||
})
|
||||
|
||||
// previously, we missed a panic because we were not exercising this path
|
||||
Context("update Node.Spec.ConfigSource: non-nil last-known-good to a new non-nil last-known-good", func() {
|
||||
It(itDescription, func() {
|
||||
var err error
|
||||
// we base the "lkg" configmap off of the configuration from before the test
|
||||
lkgKC := beforeKC.DeepCopy()
|
||||
lkgConfigMap1 := newKubeletConfigMap("dynamic-kubelet-config-test-lkg-1", lkgKC)
|
||||
lkgConfigMap1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap1)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
lkgSource1 := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
Namespace: lkgConfigMap1.Namespace,
|
||||
Name: lkgConfigMap1.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
lkgStatus1 := lkgSource1.DeepCopy()
|
||||
lkgStatus1.ConfigMap.UID = lkgConfigMap1.UID
|
||||
lkgStatus1.ConfigMap.ResourceVersion = lkgConfigMap1.ResourceVersion
|
||||
|
||||
lkgConfigMap2 := newKubeletConfigMap("dynamic-kubelet-config-test-lkg-2", lkgKC)
|
||||
lkgConfigMap2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap2)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
lkgSource2 := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
Namespace: lkgConfigMap2.Namespace,
|
||||
Name: lkgConfigMap2.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
lkgStatus2 := lkgSource2.DeepCopy()
|
||||
lkgStatus2.ConfigMap.UID = lkgConfigMap2.UID
|
||||
lkgStatus2.ConfigMap.ResourceVersion = lkgConfigMap2.ResourceVersion
|
||||
|
||||
// cases
|
||||
first := nodeConfigTestCase{
|
||||
desc: "last-known-good-1",
|
||||
configSource: lkgSource1,
|
||||
configMap: lkgConfigMap1,
|
||||
expectConfigStatus: expectNodeConfigStatus{
|
||||
lastKnownGood: lkgStatus1,
|
||||
},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
}
|
||||
|
||||
second := nodeConfigTestCase{
|
||||
desc: "last-known-good-2",
|
||||
configSource: lkgSource2,
|
||||
configMap: lkgConfigMap2,
|
||||
expectConfigStatus: expectNodeConfigStatus{
|
||||
lastKnownGood: lkgStatus2,
|
||||
},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
}
|
||||
|
||||
// Manually actuate this to ensure we wait for each case to become the last-known-good
|
||||
const lkgDuration = 12 * time.Minute
|
||||
By(fmt.Sprintf("setting initial state %q", first.desc))
|
||||
first.run(f, setConfigSourceFunc, true, lkgDuration)
|
||||
By(fmt.Sprintf("from %q to %q", first.desc, second.desc))
|
||||
second.run(f, setConfigSourceFunc, true, lkgDuration)
|
||||
})
|
||||
})
|
||||
|
||||
// exposes resource leaks across config changes
|
||||
Context("update Node.Spec.ConfigSource: 100 update stress test:", func() {
|
||||
It(itDescription, func() {
|
||||
|
33
vendor/k8s.io/kubernetes/test/e2e_node/e2e_node_suite_test.go
generated
vendored
33
vendor/k8s.io/kubernetes/test/e2e_node/e2e_node_suite_test.go
generated
vendored
@ -38,19 +38,19 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/system"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/services"
|
||||
"k8s.io/kubernetes/test/e2e_node/system"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/kardianos/osext"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
morereporters "github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var e2es *services.E2EServices
|
||||
@ -71,9 +71,11 @@ func init() {
|
||||
// It seems that someone is using flag.Parse() after init() and TestMain().
|
||||
// TODO(random-liu): Find who is using flag.Parse() and cause errors and move the following logic
|
||||
// into TestContext.
|
||||
// TODO(pohly): remove RegisterNodeFlags from test_context.go enable Viper config support here?
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
pflag.Parse()
|
||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||
os.Exit(m.Run())
|
||||
@ -86,7 +88,7 @@ const rootfs = "/rootfs"
|
||||
func TestE2eNode(t *testing.T) {
|
||||
if *runServicesMode {
|
||||
// If run-services-mode is specified, only run services in current process.
|
||||
services.RunE2EServices()
|
||||
services.RunE2EServices(t)
|
||||
return
|
||||
}
|
||||
if *runKubeletMode {
|
||||
@ -101,7 +103,7 @@ func TestE2eNode(t *testing.T) {
|
||||
var err error
|
||||
spec, err = loadSystemSpecFromFile(*systemSpecFile)
|
||||
if err != nil {
|
||||
glog.Exitf("Failed to load system spec: %v", err)
|
||||
klog.Exitf("Failed to load system spec: %v", err)
|
||||
}
|
||||
}
|
||||
if framework.TestContext.NodeConformance {
|
||||
@ -110,23 +112,22 @@ func TestE2eNode(t *testing.T) {
|
||||
// TODO(random-liu): Consider to chroot the whole test process to make writing
|
||||
// test easier.
|
||||
if err := syscall.Chroot(rootfs); err != nil {
|
||||
glog.Exitf("chroot %q failed: %v", rootfs, err)
|
||||
klog.Exitf("chroot %q failed: %v", rootfs, err)
|
||||
}
|
||||
}
|
||||
if _, err := system.ValidateSpec(*spec, framework.TestContext.ContainerRuntime); err != nil {
|
||||
glog.Exitf("system validation failed: %v", err)
|
||||
klog.Exitf("system validation failed: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
// If run-services-mode is not specified, run test.
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
RegisterFailHandler(Fail)
|
||||
reporters := []Reporter{}
|
||||
reportDir := framework.TestContext.ReportDir
|
||||
if reportDir != "" {
|
||||
// Create the directory if it doesn't already exists
|
||||
if err := os.MkdirAll(reportDir, 0755); err != nil {
|
||||
glog.Errorf("Failed creating report directory: %v", err)
|
||||
klog.Errorf("Failed creating report directory: %v", err)
|
||||
} else {
|
||||
// Configure a junit reporter to write to the directory
|
||||
junitFile := fmt.Sprintf("junit_%s_%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode)
|
||||
@ -145,7 +146,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
// Pre-pull the images tests depend on so we can fail immediately if there is an image pull issue
|
||||
// This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling.
|
||||
if framework.TestContext.PrepullImages {
|
||||
glog.Infof("Pre-pulling images so that they are cached for the tests.")
|
||||
klog.Infof("Pre-pulling images so that they are cached for the tests.")
|
||||
err := PrePullAllImages()
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
}
|
||||
@ -160,12 +161,12 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
// If the services are expected to keep running after test, they should not monitor the test process.
|
||||
e2es = services.NewE2EServices(*stopServices)
|
||||
Expect(e2es.Start()).To(Succeed(), "should be able to start node services.")
|
||||
glog.Infof("Node services started. Running tests...")
|
||||
klog.Infof("Node services started. Running tests...")
|
||||
} else {
|
||||
glog.Infof("Running tests without starting services.")
|
||||
klog.Infof("Running tests without starting services.")
|
||||
}
|
||||
|
||||
glog.Infof("Wait for the node to be ready")
|
||||
klog.Infof("Wait for the node to be ready")
|
||||
waitForNodeReady()
|
||||
|
||||
// Reference common test to make the import valid.
|
||||
@ -181,12 +182,12 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
var _ = SynchronizedAfterSuite(func() {}, func() {
|
||||
if e2es != nil {
|
||||
if *startServices && *stopServices {
|
||||
glog.Infof("Stopping node services...")
|
||||
klog.Infof("Stopping node services...")
|
||||
e2es.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("Tests Finished")
|
||||
klog.Infof("Tests Finished")
|
||||
})
|
||||
|
||||
// validateSystem runs system validation in a separate process and returns error if validation fails.
|
||||
@ -209,13 +210,13 @@ func maskLocksmithdOnCoreos() {
|
||||
data, err := ioutil.ReadFile("/etc/os-release")
|
||||
if err != nil {
|
||||
// Not all distros contain this file.
|
||||
glog.Infof("Could not read /etc/os-release: %v", err)
|
||||
klog.Infof("Could not read /etc/os-release: %v", err)
|
||||
return
|
||||
}
|
||||
if bytes.Contains(data, []byte("ID=coreos")) {
|
||||
output, err := exec.Command("systemctl", "mask", "--now", "locksmithd").CombinedOutput()
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("should be able to mask locksmithd - output: %q", string(output)))
|
||||
glog.Infof("Locksmithd is masked successfully")
|
||||
klog.Infof("Locksmithd is masked successfully")
|
||||
}
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/environment/conformance.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/environment/conformance.go
generated
vendored
@ -99,7 +99,7 @@ func containerRuntime() error {
|
||||
}
|
||||
|
||||
// Setup cadvisor to check the container environment
|
||||
c, err := cadvisor.New("", 0 /*don't start the http server*/, cadvisor.NewImageFsInfoProvider("docker", ""), "/var/lib/kubelet", false)
|
||||
c, err := cadvisor.New(cadvisor.NewImageFsInfoProvider("docker", ""), "/var/lib/kubelet", false)
|
||||
if err != nil {
|
||||
return printError("Container Runtime Check: %s Could not start cadvisor %v", failed, err)
|
||||
}
|
||||
|
41
vendor/k8s.io/kubernetes/test/e2e_node/eviction_test.go
generated
vendored
41
vendor/k8s.io/kubernetes/test/e2e_node/eviction_test.go
generated
vendored
@ -24,12 +24,13 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
@ -42,7 +43,7 @@ import (
|
||||
)
|
||||
|
||||
// Eviction Policy is described here:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/kubelet-eviction.md
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/kubelet-eviction.md
|
||||
|
||||
const (
|
||||
postTestConditionMonitoringPeriod = 1 * time.Minute
|
||||
@ -232,7 +233,6 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
|
||||
evictionTestTimeout := 10 * time.Minute
|
||||
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true
|
||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||
initialConfig.EvictionHard = map[string]string{"memory.available": "0%"}
|
||||
})
|
||||
@ -285,9 +285,12 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||
expectedNodeCondition := v1.NodeMemoryPressure
|
||||
expectedStarvedResource := v1.ResourceMemory
|
||||
pressureTimeout := 10 * time.Minute
|
||||
|
||||
highPriorityClassName := f.BaseName + "-high-priority"
|
||||
highPriority := int32(999999999)
|
||||
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.PodPriority)] = true
|
||||
memoryConsumed := resource.MustParse("600Mi")
|
||||
summary := eventuallyGetSummary()
|
||||
availableBytes := *(summary.Node.Memory.AvailableBytes)
|
||||
@ -297,6 +300,14 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||
initialConfig.EvictionHard = map[string]string{"memory.available": fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 2,
|
||||
@ -318,8 +329,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||
}),
|
||||
},
|
||||
}
|
||||
systemPriority := int32(2147483647)
|
||||
specs[1].pod.Spec.Priority = &systemPriority
|
||||
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, specs)
|
||||
})
|
||||
})
|
||||
@ -332,10 +342,12 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||
pressureTimeout := 10 * time.Minute
|
||||
|
||||
highPriorityClassName := f.BaseName + "-high-priority"
|
||||
highPriority := int32(999999999)
|
||||
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates[string(features.PodPriority)] = true
|
||||
initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true
|
||||
diskConsumed := resource.MustParse("350Mi")
|
||||
summary := eventuallyGetSummary()
|
||||
availableBytes := *(summary.Node.Fs.AvailableBytes)
|
||||
@ -345,6 +357,14 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||
initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 2,
|
||||
@ -367,8 +387,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||
}),
|
||||
},
|
||||
}
|
||||
systemPriority := int32(2147483647)
|
||||
specs[1].pod.Spec.Priority = &systemPriority
|
||||
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, specs)
|
||||
})
|
||||
})
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/gke_environment_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/gke_environment_test.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/blang/semver"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -170,7 +171,7 @@ func checkDockerConfig() error {
|
||||
// checkDockerNetworkClient checks client networking by pinging an external IP
|
||||
// address from a container.
|
||||
func checkDockerNetworkClient() error {
|
||||
const imageName = "k8s.gcr.io/busybox"
|
||||
imageName := imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
output, err := runCommand("docker", "run", "--rm", imageName, "sh", "-c", "ping -w 5 -q google.com")
|
||||
if err != nil {
|
||||
return err
|
||||
@ -310,7 +311,7 @@ func checkDockerStorageDriver() error {
|
||||
return fmt.Errorf("failed to find storage driver")
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("GKE system requirements [Conformance][NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
|
||||
var _ = framework.KubeDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
|
||||
BeforeEach(func() {
|
||||
framework.RunIfSystemSpecNameIs("gke")
|
||||
})
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e_node/gpu_device_plugin.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e_node/gpu_device_plugin.go
generated
vendored
@ -42,6 +42,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
|
||||
Context("DevicePlugin", func() {
|
||||
var devicePluginPod *v1.Pod
|
||||
var err error
|
||||
BeforeEach(func() {
|
||||
By("Ensuring that Nvidia GPUs exists on the node")
|
||||
if !checkIfNvidiaGPUsExistOnNode() {
|
||||
@ -49,12 +50,13 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
}
|
||||
|
||||
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
|
||||
devicePluginPod = f.PodClient().CreateSync(framework.NVIDIADevicePlugin(f.Namespace.Name))
|
||||
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(framework.NVIDIADevicePlugin())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for GPUs to become available on the local node")
|
||||
Eventually(func() bool {
|
||||
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||
}, 10*time.Second, framework.Poll).Should(BeTrue())
|
||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||
|
||||
if framework.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
|
||||
Skip("Not enough GPUs to execute this test (at least two needed)")
|
||||
@ -97,7 +99,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||
Eventually(func() bool {
|
||||
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||
}, 10*time.Second, framework.Poll).Should(BeTrue())
|
||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
||||
p2 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
|
||||
|
||||
By("Checking that pods got a different GPU")
|
||||
@ -106,7 +108,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
Expect(devId1).To(Not(Equal(devId2)))
|
||||
|
||||
By("Deleting device plugin.")
|
||||
f.PodClient().Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
|
||||
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
|
||||
By("Waiting for GPUs to become unavailable on the local node")
|
||||
Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e_node/hugepages_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e_node/hugepages_test.go
generated
vendored
@ -28,9 +28,10 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -168,7 +169,7 @@ func runHugePagesTests(f *framework.Framework) {
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: apiv1.ResourceRequirements{
|
||||
Limits: apiv1.ResourceList{
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e_node/image_list.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e_node/image_list.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"os/user"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
@ -47,12 +47,15 @@ var NodeImageWhiteList = sets.NewString(
|
||||
busyboxImage,
|
||||
"k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff",
|
||||
"k8s.gcr.io/node-problem-detector:v0.4.1",
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
imageutils.GetE2EImage(imageutils.Nginx),
|
||||
imageutils.GetE2EImage(imageutils.ServeHostname),
|
||||
imageutils.GetE2EImage(imageutils.Netexec),
|
||||
imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||
imageutils.GetPauseImageName(),
|
||||
framework.GetGPUDevicePluginImage(),
|
||||
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-is-amd64:1.0",
|
||||
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep-amd64:1.0",
|
||||
"gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0",
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -125,7 +128,7 @@ func PrePullAllImages() error {
|
||||
return err
|
||||
}
|
||||
images := framework.ImageWhiteList.List()
|
||||
glog.V(4).Infof("Pre-pulling images with %s %+v", puller.Name(), images)
|
||||
klog.V(4).Infof("Pre-pulling images with %s %+v", puller.Name(), images)
|
||||
for _, image := range images {
|
||||
var (
|
||||
err error
|
||||
@ -138,11 +141,11 @@ func PrePullAllImages() error {
|
||||
if output, err = puller.Pull(image); err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("Failed to pull %s as user %q, retrying in %s (%d of %d): %v",
|
||||
klog.Warningf("Failed to pull %s as user %q, retrying in %s (%d of %d): %v",
|
||||
image, usr.Username, imagePullRetryDelay.String(), i+1, maxImagePullRetries, err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("Could not pre-pull image %s %v output: %s", image, err, output)
|
||||
klog.Warningf("Could not pre-pull image %s %v output: %s", image, err, output)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
200
vendor/k8s.io/kubernetes/test/e2e_node/kubelet_test.go
generated
vendored
200
vendor/k8s.io/kubernetes/test/e2e_node/kubelet_test.go
generated
vendored
@ -1,200 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-test")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
Context("when scheduling a busybox command in a pod", func() {
|
||||
podName := "busybox-scheduling-" + string(uuid.NewUUID())
|
||||
framework.ConformanceIt("it should print the output to logs [NodeConformance]", func() {
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
Eventually(func() string {
|
||||
sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
|
||||
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(rc)
|
||||
return buf.String()
|
||||
}, time.Minute, time.Second*4).Should(Equal("Hello World\n"))
|
||||
})
|
||||
})
|
||||
Context("when scheduling a busybox command that always fails in a pod", func() {
|
||||
var podName string
|
||||
|
||||
BeforeEach(func() {
|
||||
podName = "bin-false" + string(uuid.NewUUID())
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/false"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
It("should have an error terminated reason [NodeConformance]", func() {
|
||||
Eventually(func() error {
|
||||
podData, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podData.Status.ContainerStatuses) != 1 {
|
||||
return fmt.Errorf("expected only one container in the pod %q", podName)
|
||||
}
|
||||
contTerminatedState := podData.Status.ContainerStatuses[0].State.Terminated
|
||||
if contTerminatedState == nil {
|
||||
return fmt.Errorf("expected state to be terminated. Got pod status: %+v", podData.Status)
|
||||
}
|
||||
if contTerminatedState.Reason != "Error" {
|
||||
return fmt.Errorf("expected terminated state reason to be error. Got %+v", contTerminatedState)
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
|
||||
It("should be possible to delete [NodeConformance]", func() {
|
||||
err := podClient.Delete(podName, &metav1.DeleteOptions{})
|
||||
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
|
||||
})
|
||||
})
|
||||
Context("when scheduling a busybox Pod with hostAliases", func() {
|
||||
podName := "busybox-host-aliases" + string(uuid.NewUUID())
|
||||
|
||||
It("it should write entries to /etc/hosts [NodeConformance]", func() {
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/sh", "-c", "cat /etc/hosts; sleep 6000"},
|
||||
},
|
||||
},
|
||||
HostAliases: []v1.HostAlias{
|
||||
{
|
||||
IP: "123.45.67.89",
|
||||
Hostnames: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
Eventually(func() error {
|
||||
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
|
||||
defer rc.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(rc)
|
||||
hostsFileContent := buf.String()
|
||||
|
||||
if !strings.Contains(hostsFileContent, "123.45.67.89\tfoo") || !strings.Contains(hostsFileContent, "123.45.67.89\tbar") {
|
||||
return fmt.Errorf("expected hosts file to contain entries from HostAliases. Got:\n%+v", hostsFileContent)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
})
|
||||
Context("when scheduling a read only busybox container", func() {
|
||||
podName := "busybox-readonly-fs" + string(uuid.NewUUID())
|
||||
framework.ConformanceIt("it should not write to root filesystem [NodeConformance]", func() {
|
||||
isReadOnly := true
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/sh", "-c", "echo test > /file; sleep 240"},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &isReadOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
Eventually(func() string {
|
||||
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(rc)
|
||||
return buf.String()
|
||||
}, time.Minute, time.Second*4).Should(Equal("/bin/sh: can't create /file: Read-only file system\n"))
|
||||
})
|
||||
})
|
||||
})
|
153
vendor/k8s.io/kubernetes/test/e2e_node/lifecycle_hook_test.go
generated
vendored
153
vendor/k8s.io/kubernetes/test/e2e_node/lifecycle_hook_test.go
generated
vendored
@ -1,153 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
f := framework.NewDefaultFramework("container-lifecycle-hook")
|
||||
var podClient *framework.PodClient
|
||||
const (
|
||||
podCheckInterval = 1 * time.Second
|
||||
postStartWaitTimeout = 2 * time.Minute
|
||||
preStopWaitTimeout = 30 * time.Second
|
||||
)
|
||||
Context("when create a pod with lifecycle hook", func() {
|
||||
var targetIP string
|
||||
podHandleHookRequest := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-handle-http-request",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pod-handle-http-request",
|
||||
Image: imageutils.GetE2EImage(imageutils.Netexec),
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8080,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
By("create the container to handle the HTTPGet hook request.")
|
||||
newPod := podClient.CreateSync(podHandleHookRequest)
|
||||
targetIP = newPod.Status.PodIP
|
||||
})
|
||||
testPodWithHook := func(podWithHook *v1.Pod) {
|
||||
By("create the pod with lifecycle hook")
|
||||
podClient.CreateSync(podWithHook)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
|
||||
By("check poststart hook")
|
||||
Eventually(func() error {
|
||||
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
|
||||
`GET /echo\?msg=poststart`)
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
|
||||
}
|
||||
By("delete the pod with lifecycle hook")
|
||||
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
|
||||
By("check prestop hook")
|
||||
Eventually(func() error {
|
||||
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
|
||||
`GET /echo\?msg=prestop`)
|
||||
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
|
||||
}
|
||||
}
|
||||
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=poststart"},
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=prestop"},
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/echo?msg=poststart",
|
||||
Host: targetIP,
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/echo?msg=prestop",
|
||||
Host: targetIP,
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func getPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Lifecycle: lifecycle,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e_node/mirror_pod_test.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e_node/mirror_pod_test.go
generated
vendored
@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
|
||||
By("create the static pod")
|
||||
err := createStaticPod(podPath, staticPodName, ns,
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim), v1.RestartPolicyAlways)
|
||||
imageutils.GetE2EImage(imageutils.Nginx), v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be running")
|
||||
@ -57,7 +57,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
framework.ConformanceIt("should be updated when static pod updated [NodeConformance]", func() {
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Mirror Pod, update
|
||||
Description: Updating a static Pod MUST recreate an updated mirror Pod. Create a static pod, verify that a mirror pod is created. Update the static pod by changing the container image, the mirror pod MUST be re-created and updated with the new image.
|
||||
*/
|
||||
It("should be updated when static pod updated [NodeConformance]", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
@ -79,7 +84,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
Expect(len(pod.Spec.Containers)).Should(Equal(1))
|
||||
Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
|
||||
})
|
||||
framework.ConformanceIt("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Mirror Pod, delete
|
||||
Description: When a mirror-Pod is deleted then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod, the mirror pod MUST be re-created and running.
|
||||
*/
|
||||
It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
@ -94,7 +104,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
framework.ConformanceIt("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Mirror Pod, force delete
|
||||
Description: When a mirror-Pod is deleted, forcibly, then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod with delete wait time set to zero forcing immediate deletion, the mirror pod MUST be re-created and running.
|
||||
*/
|
||||
It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
|
||||
By("get mirror pod uid")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/node_container_manager_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/node_container_manager_test.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
108
vendor/k8s.io/kubernetes/test/e2e_node/node_perf_test.go
generated
vendored
Normal file
108
vendor/k8s.io/kubernetes/test/e2e_node/node_perf_test.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/perf/workloads"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// makeNodePerfPod returns a pod with the information provided from the workload.
|
||||
func makeNodePerfPod(w workloads.NodePerfWorkload) *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-pod", w.Name()),
|
||||
},
|
||||
Spec: w.PodSpec(),
|
||||
}
|
||||
}
|
||||
|
||||
func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) {
|
||||
if cfg != nil {
|
||||
framework.ExpectNoError(setKubeletConfiguration(f, cfg))
|
||||
}
|
||||
|
||||
// Wait for the Kubelet to be ready.
|
||||
Eventually(func() bool {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
return len(nodeList.Items) == 1
|
||||
}, time.Minute, time.Second).Should(BeTrue())
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
// Slow by design.
|
||||
var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("node-performance-testing")
|
||||
|
||||
Context("Run node performance testing with pre-defined workloads", func() {
|
||||
It("run each pre-defined workload", func() {
|
||||
By("running the workloads")
|
||||
for _, workload := range workloads.NodePerfWorkloads {
|
||||
By("running the pre test exec from the workload")
|
||||
err := workload.PreTestExec()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("restarting kubelet with required configuration")
|
||||
// Get the Kubelet config required for this workload.
|
||||
oldCfg, err := getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
newCfg, err := workload.KubeletConfig(oldCfg)
|
||||
framework.ExpectNoError(err)
|
||||
// Set the Kubelet config required for this workload.
|
||||
setKubeletConfig(f, newCfg)
|
||||
|
||||
By("running the workload and waiting for success")
|
||||
// Make the pod for the workload.
|
||||
pod := makeNodePerfPod(workload)
|
||||
|
||||
// Create the pod.
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
// Wait for pod success.
|
||||
f.PodClient().WaitForSuccess(pod.Name, workload.Timeout())
|
||||
podLogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
perf, err := workload.ExtractPerformanceFromLogs(podLogs)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Time to complete workload %s: %v", workload.Name(), perf)
|
||||
|
||||
// Delete the pod.
|
||||
gp := int64(0)
|
||||
delOpts := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
By("running the post test exec from the workload")
|
||||
err = workload.PostTestExec()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Set the Kubelet config back to the old one.
|
||||
setKubeletConfig(f, oldCfg)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
35
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/BUILD
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/BUILD
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"npb_ep.go",
|
||||
"npb_is.go",
|
||||
"tf_wide_deep.go",
|
||||
"utils.go",
|
||||
"workloads.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/perf/workloads",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/OWNERS
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/OWNERS
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
approvers:
|
||||
- vishh
|
||||
- derekwaynecarr
|
||||
- balajismaniam
|
||||
- ConnorDoyle
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
120
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/npb_ep.go
generated
vendored
Normal file
120
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/npb_ep.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workloads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
)
|
||||
|
||||
// npbEPWorkload defines a workload to run the Embarrassingly Parallel (EP) workload
|
||||
// from NAS parallel benchmark (NPB) suite.
|
||||
type npbEPWorkload struct{}
|
||||
|
||||
// Ensure npbEPWorkload implemets NodePerfWorkload interface.
|
||||
var _ NodePerfWorkload = &npbEPWorkload{}
|
||||
|
||||
func (w npbEPWorkload) Name() string {
|
||||
return "npb-ep"
|
||||
}
|
||||
|
||||
func (w npbEPWorkload) PodSpec() corev1.PodSpec {
|
||||
var containers []corev1.Container
|
||||
ctn := corev1.Container{
|
||||
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
||||
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep-amd64:1.0",
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
},
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
},
|
||||
},
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "/ep.D.x"},
|
||||
}
|
||||
containers = append(containers, ctn)
|
||||
|
||||
return corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Containers: containers,
|
||||
}
|
||||
}
|
||||
|
||||
func (w npbEPWorkload) Timeout() time.Duration {
|
||||
return 10 * time.Minute
|
||||
}
|
||||
|
||||
func (w npbEPWorkload) KubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration) (newCfg *kubeletconfig.KubeletConfiguration, err error) {
|
||||
// Enable CPU Manager in Kubelet with static policy.
|
||||
newCfg = oldCfg.DeepCopy()
|
||||
// Set the CPU Manager policy to static.
|
||||
newCfg.CPUManagerPolicy = string(cpumanager.PolicyStatic)
|
||||
// Set the CPU Manager reconcile period to 10 second.
|
||||
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 10 * time.Second}
|
||||
|
||||
// The Kubelet panics if either kube-reserved or system-reserved is not set
|
||||
// when static CPU Manager is enabled. Set cpu in kube-reserved > 0 so that
|
||||
// kubelet doesn't panic.
|
||||
if newCfg.KubeReserved == nil {
|
||||
newCfg.KubeReserved = map[string]string{}
|
||||
}
|
||||
|
||||
if _, ok := newCfg.KubeReserved["cpu"]; !ok {
|
||||
newCfg.KubeReserved["cpu"] = "200m"
|
||||
}
|
||||
|
||||
return newCfg, nil
|
||||
}
|
||||
|
||||
func (w npbEPWorkload) PreTestExec() error {
|
||||
cmd := "/bin/sh"
|
||||
args := []string{"-c", "rm -f /var/lib/kubelet/cpu_manager_state"}
|
||||
err := runCmd(cmd, args)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w npbEPWorkload) PostTestExec() error {
|
||||
cmd := "/bin/sh"
|
||||
args := []string{"-c", "rm -f /var/lib/kubelet/cpu_manager_state"}
|
||||
err := runCmd(cmd, args)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w npbEPWorkload) ExtractPerformanceFromLogs(logs string) (perf time.Duration, err error) {
|
||||
perfLine, err := getMatchingLineFromLog(logs, "Time in seconds =")
|
||||
if err != nil {
|
||||
return perf, err
|
||||
}
|
||||
perfStrings := strings.Split(perfLine, "=")
|
||||
perfString := fmt.Sprintf("%ss", strings.TrimSpace(perfStrings[1]))
|
||||
perf, err = time.ParseDuration(perfString)
|
||||
|
||||
return perf, err
|
||||
}
|
92
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/npb_is.go
generated
vendored
Normal file
92
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/npb_is.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workloads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
)
|
||||
|
||||
// npbISWorkload defines a workload to run the integer sort (IS) workload
|
||||
// from NAS parallel benchmark (NPB) suite.
|
||||
type npbISWorkload struct{}
|
||||
|
||||
// Ensure npbISWorkload implemets NodePerfWorkload interface.
|
||||
var _ NodePerfWorkload = &npbISWorkload{}
|
||||
|
||||
func (w npbISWorkload) Name() string {
|
||||
return "npb-is"
|
||||
}
|
||||
|
||||
func (w npbISWorkload) PodSpec() corev1.PodSpec {
|
||||
var containers []corev1.Container
|
||||
ctn := corev1.Container{
|
||||
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
||||
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is-amd64:1.0",
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
},
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
},
|
||||
},
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "/is.D.x"},
|
||||
}
|
||||
containers = append(containers, ctn)
|
||||
|
||||
return corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Containers: containers,
|
||||
}
|
||||
}
|
||||
|
||||
func (w npbISWorkload) Timeout() time.Duration {
|
||||
return 4 * time.Minute
|
||||
}
|
||||
|
||||
func (w npbISWorkload) KubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration) (newCfg *kubeletconfig.KubeletConfiguration, err error) {
|
||||
return oldCfg, nil
|
||||
}
|
||||
|
||||
func (w npbISWorkload) PreTestExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w npbISWorkload) PostTestExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w npbISWorkload) ExtractPerformanceFromLogs(logs string) (perf time.Duration, err error) {
|
||||
perfLine, err := getMatchingLineFromLog(logs, "Time in seconds =")
|
||||
if err != nil {
|
||||
return perf, err
|
||||
}
|
||||
perfStrings := strings.Split(perfLine, "=")
|
||||
perfString := fmt.Sprintf("%ss", strings.TrimSpace(perfStrings[1]))
|
||||
perf, err = time.ParseDuration(perfString)
|
||||
|
||||
return perf, err
|
||||
}
|
119
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/tf_wide_deep.go
generated
vendored
Normal file
119
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/tf_wide_deep.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workloads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
)
|
||||
|
||||
// tfWideDeepWorkload defines a workload to run
|
||||
// https://github.com/tensorflow/models/tree/master/official/wide_deep.
|
||||
type tfWideDeepWorkload struct{}
|
||||
|
||||
// Ensure tfWideDeepWorkload implemets NodePerfWorkload interface.
|
||||
var _ NodePerfWorkload = &tfWideDeepWorkload{}
|
||||
|
||||
func (w tfWideDeepWorkload) Name() string {
|
||||
return "tensorflow-wide-deep"
|
||||
}
|
||||
|
||||
func (w tfWideDeepWorkload) PodSpec() corev1.PodSpec {
|
||||
var containers []corev1.Container
|
||||
ctn := corev1.Container{
|
||||
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
||||
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0",
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("16Gi"),
|
||||
},
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "python ./data_download.py && time -p python ./wide_deep.py --model_type=wide_deep --train_epochs=300 --epochs_between_evals=300 --batch_size=32561"},
|
||||
}
|
||||
containers = append(containers, ctn)
|
||||
|
||||
return corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Containers: containers,
|
||||
}
|
||||
}
|
||||
|
||||
func (w tfWideDeepWorkload) Timeout() time.Duration {
|
||||
return 15 * time.Minute
|
||||
}
|
||||
|
||||
func (w tfWideDeepWorkload) KubeletConfig(oldCfg *kubeletconfig.KubeletConfiguration) (newCfg *kubeletconfig.KubeletConfiguration, err error) {
|
||||
// Enable CPU Manager in Kubelet with static policy.
|
||||
newCfg = oldCfg.DeepCopy()
|
||||
// Set the CPU Manager policy to static.
|
||||
newCfg.CPUManagerPolicy = string(cpumanager.PolicyStatic)
|
||||
// Set the CPU Manager reconcile period to 10 second.
|
||||
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 10 * time.Second}
|
||||
|
||||
// The Kubelet panics if either kube-reserved or system-reserved is not set
|
||||
// when static CPU Manager is enabled. Set cpu in kube-reserved > 0 so that
|
||||
// kubelet doesn't panic.
|
||||
if newCfg.KubeReserved == nil {
|
||||
newCfg.KubeReserved = map[string]string{}
|
||||
}
|
||||
|
||||
if _, ok := newCfg.KubeReserved["cpu"]; !ok {
|
||||
newCfg.KubeReserved["cpu"] = "200m"
|
||||
}
|
||||
|
||||
return newCfg, nil
|
||||
}
|
||||
|
||||
func (w tfWideDeepWorkload) PreTestExec() error {
|
||||
cmd := "/bin/sh"
|
||||
args := []string{"-c", "rm -f /var/lib/kubelet/cpu_manager_state"}
|
||||
err := runCmd(cmd, args)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w tfWideDeepWorkload) PostTestExec() error {
|
||||
cmd := "/bin/sh"
|
||||
args := []string{"-c", "rm -f /var/lib/kubelet/cpu_manager_state"}
|
||||
err := runCmd(cmd, args)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w tfWideDeepWorkload) ExtractPerformanceFromLogs(logs string) (perf time.Duration, err error) {
|
||||
perfLine, err := getMatchingLineFromLog(logs, "real")
|
||||
if err != nil {
|
||||
return perf, err
|
||||
}
|
||||
perfString := fmt.Sprintf("%ss", strings.TrimSpace(strings.TrimPrefix(perfLine, "real")))
|
||||
perf, err = time.ParseDuration(perfString)
|
||||
|
||||
return perf, err
|
||||
}
|
45
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/utils.go
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/utils.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workloads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func runCmd(cmd string, args []string) error {
|
||||
err := exec.Command(cmd, args...).Run()
|
||||
return err
|
||||
}
|
||||
|
||||
func getMatchingLineFromLog(log string, pattern string) (line string, err error) {
|
||||
regex, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return line, fmt.Errorf("failed to compile regexp %v: %v", pattern, err)
|
||||
}
|
||||
|
||||
logLines := strings.Split(log, "\n")
|
||||
for _, line := range logLines {
|
||||
if regex.MatchString(line) {
|
||||
return line, nil
|
||||
}
|
||||
}
|
||||
|
||||
return line, fmt.Errorf("line with pattern %v not found in log", pattern)
|
||||
}
|
53
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/workloads.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/test/e2e_node/perf/workloads/workloads.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workloads
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
)
|
||||
|
||||
// NodePerfWorkload provides the necessary information to run a workload for
|
||||
// node performance testing.
|
||||
type NodePerfWorkload interface {
|
||||
// Name of the workload.
|
||||
Name() string
|
||||
// PodSpec used to run this workload.
|
||||
PodSpec() corev1.PodSpec
|
||||
// Timeout provides the expected time to completion
|
||||
// for this workload.
|
||||
Timeout() time.Duration
|
||||
// KubeletConfig specifies the Kubelet configuration
|
||||
// required for this workload.
|
||||
KubeletConfig(old *kubeletconfig.KubeletConfiguration) (new *kubeletconfig.KubeletConfiguration, err error)
|
||||
// PreTestExec is used for defining logic that needs
|
||||
// to be run before restarting the Kubelet with the new Kubelet
|
||||
// configuration required for the workload.
|
||||
PreTestExec() error
|
||||
// PostTestExec is used for defining logic that needs
|
||||
// to be run after the workload has completed.
|
||||
PostTestExec() error
|
||||
// ExtractPerformanceFromLogs is used get the performance of the workload
|
||||
// from pod logs. Currently, we support only performance reported in
|
||||
// time.Duration format.
|
||||
ExtractPerformanceFromLogs(logs string) (perf time.Duration, err error)
|
||||
}
|
||||
|
||||
// NodePerfWorkloads is the collection of all node performance testing workloads.
|
||||
var NodePerfWorkloads = []NodePerfWorkload{npbISWorkload{}, npbEPWorkload{}, tfWideDeepWorkload{}}
|
4
vendor/k8s.io/kubernetes/test/e2e_node/pods_container_manager_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/pods_container_manager_test.go
generated
vendored
@ -27,9 +27,9 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// getResourceList returns a ResourceList with the
|
||||
@ -71,7 +71,7 @@ func makePodToVerifyCgroups(cgroupNames []string) *v1.Pod {
|
||||
cgroupName := cm.NewCgroupName(rootCgroupName, cgroupComponents...)
|
||||
cgroupFsNames = append(cgroupFsNames, toCgroupFsName(cgroupName))
|
||||
}
|
||||
glog.Infof("expecting %v cgroups to be found", cgroupFsNames)
|
||||
klog.Infof("expecting %v cgroups to be found", cgroupFsNames)
|
||||
// build the pod command to either verify cgroups exist
|
||||
command := ""
|
||||
for _, cgroupFsName := range cgroupFsNames {
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/remote/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/remote/BUILD
generated
vendored
@ -18,10 +18,10 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/remote",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/remote/cadvisor_e2e.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/remote/cadvisor_e2e.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -67,7 +67,7 @@ func (n *CAdvisorE2ERemote) RunTest(host, workspace, results, imageDesc, junitFi
|
||||
// Kill any running node processes
|
||||
cleanupNodeProcesses(host)
|
||||
|
||||
glog.V(2).Infof("Starting tests on %q", host)
|
||||
klog.V(2).Infof("Starting tests on %q", host)
|
||||
return SSH(host, "sh", "-c", getSSHCommand(" && ",
|
||||
fmt.Sprintf("cd %s/cadvisor", workspace),
|
||||
fmt.Sprintf("timeout -k 30s %fs ./build/integration.sh ../results/cadvisor.log",
|
||||
|
18
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
@ -206,13 +206,13 @@ func launchKubelet(host, workspace, results, testArgs string) error {
|
||||
),
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("Launch kubelet with command: %v", cmd)
|
||||
klog.V(2).Infof("Launch kubelet with command: %v", cmd)
|
||||
output, err := SSH(host, cmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to launch kubelet with command %v: error - %v output - %q",
|
||||
cmd, err, output)
|
||||
}
|
||||
glog.Info("Successfully launch kubelet")
|
||||
klog.Info("Successfully launch kubelet")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -221,12 +221,12 @@ const kubeletStopGracePeriod = 10 * time.Second
|
||||
|
||||
// stopKubelet stops kubelet launcher and kubelet gracefully.
|
||||
func stopKubelet(host, workspace string) error {
|
||||
glog.Info("Gracefully stop kubelet launcher")
|
||||
klog.Info("Gracefully stop kubelet launcher")
|
||||
if output, err := SSH(host, "pkill", conformanceTestBinary); err != nil {
|
||||
return fmt.Errorf("failed to gracefully stop kubelet launcher: error - %v output - %q",
|
||||
err, output)
|
||||
}
|
||||
glog.Info("Wait for kubelet launcher to stop")
|
||||
klog.Info("Wait for kubelet launcher to stop")
|
||||
stopped := false
|
||||
for start := time.Now(); time.Since(start) < kubeletStopGracePeriod; time.Sleep(time.Second) {
|
||||
// Check whether the process is still running.
|
||||
@ -242,13 +242,13 @@ func stopKubelet(host, workspace string) error {
|
||||
}
|
||||
}
|
||||
if !stopped {
|
||||
glog.Info("Forcibly stop kubelet")
|
||||
klog.Info("Forcibly stop kubelet")
|
||||
if output, err := SSH(host, "pkill", "-SIGKILL", conformanceTestBinary); err != nil {
|
||||
return fmt.Errorf("failed to forcibly stop kubelet: error - %v output - %q",
|
||||
err, output)
|
||||
}
|
||||
}
|
||||
glog.Info("Successfully stop kubelet")
|
||||
klog.Info("Successfully stop kubelet")
|
||||
// Clean up the pod manifest path
|
||||
podManifestPath := getPodPath(workspace)
|
||||
if output, err := SSH(host, "rm", "-f", filepath.Join(workspace, podManifestPath)); err != nil {
|
||||
@ -286,12 +286,12 @@ func (c *ConformanceRemote) RunTest(host, workspace, results, imageDesc, junitFi
|
||||
defer func() {
|
||||
if err := stopKubelet(host, workspace); err != nil {
|
||||
// Only log an error if failed to stop kubelet because it is not critical.
|
||||
glog.Errorf("failed to stop kubelet: %v", err)
|
||||
klog.Errorf("failed to stop kubelet: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Run the tests
|
||||
glog.V(2).Infof("Starting tests on %q", host)
|
||||
klog.V(2).Infof("Starting tests on %q", host)
|
||||
podManifestPath := getPodPath(workspace)
|
||||
cmd := fmt.Sprintf("'timeout -k 30s %fs docker run --rm --privileged=true --net=host -v /:/rootfs -v %s:%s -v %s:/var/result -e TEST_ARGS=--report-prefix=%s %s'",
|
||||
timeout.Seconds(), podManifestPath, podManifestPath, results, junitFilePrefix, getConformanceTestImageName(systemSpecName))
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go
generated
vendored
@ -24,14 +24,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
systemSpecPath = "test/e2e_node/system/specs"
|
||||
systemSpecPath = "k8s.io/kubernetes/cmd/kubeadm/app/util/system/specs"
|
||||
)
|
||||
|
||||
// NodeE2ERemote contains the specific functions in the node e2e test suite.
|
||||
@ -108,7 +108,7 @@ func tarAddFile(tar, source, dest string) error {
|
||||
// prependCOSMounterFlag prepends the flag for setting the GCI mounter path to
|
||||
// args and returns the result.
|
||||
func prependCOSMounterFlag(args, host, workspace string) (string, error) {
|
||||
glog.V(2).Infof("GCI/COS node and GCI/COS mounter both detected, modifying --experimental-mounter-path accordingly")
|
||||
klog.V(2).Infof("GCI/COS node and GCI/COS mounter both detected, modifying --experimental-mounter-path accordingly")
|
||||
mounterPath := filepath.Join(workspace, "mounter")
|
||||
args = fmt.Sprintf("--kubelet-flags=--experimental-mounter-path=%s ", mounterPath) + args
|
||||
return args, nil
|
||||
@ -164,7 +164,7 @@ func (n *NodeE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePr
|
||||
}
|
||||
|
||||
// Run the tests
|
||||
glog.V(2).Infof("Starting tests on %q", host)
|
||||
klog.V(2).Infof("Starting tests on %q", host)
|
||||
cmd := getSSHCommand(" && ",
|
||||
fmt.Sprintf("cd %s", workspace),
|
||||
fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --system-spec-name=%s --system-spec-file=%s --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s --image-description=\"%s\" %s",
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go
generated
vendored
@ -27,8 +27,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var testTimeoutSeconds = flag.Duration("test-timeout", 45*time.Minute, "How long (in golang duration format) to wait for ginkgo tests to complete.")
|
||||
@ -37,7 +37,7 @@ var resultsDir = flag.String("results-dir", "/tmp/", "Directory to scp test resu
|
||||
const archiveName = "e2e_node_test.tar.gz"
|
||||
|
||||
func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) {
|
||||
glog.V(2).Infof("Building archive...")
|
||||
klog.V(2).Infof("Building archive...")
|
||||
tardir, err := ioutil.TempDir("", "node-e2e-archive")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temporary directory %v.", err)
|
||||
@ -67,7 +67,7 @@ func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) {
|
||||
// TODO(random-liu): junitFilePrefix is not prefix actually, the file name is junit-junitFilePrefix.xml. Change the variable name.
|
||||
func RunRemote(suite TestSuite, archive string, host string, cleanup bool, imageDesc, junitFilePrefix string, testArgs string, ginkgoArgs string, systemSpecName string) (string, bool, error) {
|
||||
// Create the temp staging directory
|
||||
glog.V(2).Infof("Staging test binaries on %q", host)
|
||||
klog.V(2).Infof("Staging test binaries on %q", host)
|
||||
workspace := newWorkspaceDir()
|
||||
// Do not sudo here, so that we can use scp to copy test archive to the directdory.
|
||||
if output, err := SSHNoSudo(host, "mkdir", workspace); err != nil {
|
||||
@ -78,7 +78,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image
|
||||
defer func() {
|
||||
output, err := SSH(host, "rm", "-rf", workspace)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to cleanup workspace %q on host %q: %v. Output:\n%s", workspace, host, err, output)
|
||||
klog.Errorf("failed to cleanup workspace %q on host %q: %v. Output:\n%s", workspace, host, err, output)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -94,7 +94,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image
|
||||
fmt.Sprintf("cd %s", workspace),
|
||||
fmt.Sprintf("tar -xzvf ./%s", archiveName),
|
||||
)
|
||||
glog.V(2).Infof("Extracting tar on %q", host)
|
||||
klog.V(2).Infof("Extracting tar on %q", host)
|
||||
// Do not use sudo here, because `sudo tar -x` will recover the file ownership inside the tar ball, but
|
||||
// we want the extracted files to be owned by the current user.
|
||||
if output, err := SSHNoSudo(host, "sh", "-c", cmd); err != nil {
|
||||
@ -109,7 +109,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image
|
||||
return "", false, fmt.Errorf("failed to create test result directory %q on host %q: %v output: %q", resultDir, host, err, output)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Running test on %q", host)
|
||||
klog.V(2).Infof("Running test on %q", host)
|
||||
output, err := suite.RunTest(host, workspace, resultDir, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, *testTimeoutSeconds)
|
||||
|
||||
aggErrs := []error{}
|
||||
@ -119,7 +119,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image
|
||||
collectSystemLog(host)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Copying test artifacts from %q", host)
|
||||
klog.V(2).Infof("Copying test artifacts from %q", host)
|
||||
scpErr := getTestArtifacts(host, workspace)
|
||||
if scpErr != nil {
|
||||
aggErrs = append(aggErrs, scpErr)
|
||||
@ -194,17 +194,17 @@ func collectSystemLog(host string) {
|
||||
logPath = fmt.Sprintf("/tmp/%s-%s", getTimestamp(), logName)
|
||||
destPath = fmt.Sprintf("%s/%s-%s", *resultsDir, host, logName)
|
||||
)
|
||||
glog.V(2).Infof("Test failed unexpectedly. Attempting to retrieving system logs (only works for nodes with journald)")
|
||||
klog.V(2).Infof("Test failed unexpectedly. Attempting to retrieving system logs (only works for nodes with journald)")
|
||||
// Try getting the system logs from journald and store it to a file.
|
||||
// Don't reuse the original test directory on the remote host because
|
||||
// it could've be been removed if the node was rebooted.
|
||||
if output, err := SSH(host, "sh", "-c", fmt.Sprintf("'journalctl --system --all > %s'", logPath)); err == nil {
|
||||
glog.V(2).Infof("Got the system logs from journald; copying it back...")
|
||||
klog.V(2).Infof("Got the system logs from journald; copying it back...")
|
||||
if output, err := runSSHCommand("scp", fmt.Sprintf("%s:%s", GetHostnameOrIp(host), logPath), destPath); err != nil {
|
||||
glog.V(2).Infof("Failed to copy the log: err: %v, output: %q", err, output)
|
||||
klog.V(2).Infof("Failed to copy the log: err: %v, output: %q", err, output)
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("Failed to run journactl (normal if it doesn't exist on the node): %v, output: %q", err, output)
|
||||
klog.V(2).Infof("Failed to run journactl (normal if it doesn't exist on the node): %v, output: %q", err, output)
|
||||
}
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/remote/ssh.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/remote/ssh.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var sshOptions = flag.String("ssh-options", "", "Commandline options passed to ssh.")
|
||||
@ -38,7 +38,7 @@ var sshDefaultKeyMap map[string]string
|
||||
func init() {
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
klog.Fatal(err)
|
||||
}
|
||||
sshOptionsMap = map[string]string{
|
||||
"gce": "-o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o CheckHostIP=no -o StrictHostKeyChecking=no -o ServerAliveInterval=30 -o LogLevel=ERROR",
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e_node/remote/utils.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e_node/remote/utils.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// utils.go contains functions used across test suites.
|
||||
@ -52,7 +52,7 @@ const cniConfig = `{
|
||||
// Install the cni plugin and add basic bridge configuration to the
|
||||
// configuration directory.
|
||||
func setupCNI(host, workspace string) error {
|
||||
glog.V(2).Infof("Install CNI on %q", host)
|
||||
klog.V(2).Infof("Install CNI on %q", host)
|
||||
cniPath := filepath.Join(workspace, cniDirectory)
|
||||
cmd := getSSHCommand(" ; ",
|
||||
fmt.Sprintf("mkdir -p %s", cniPath),
|
||||
@ -65,7 +65,7 @@ func setupCNI(host, workspace string) error {
|
||||
// The added CNI network config is not needed for kubenet. It is only
|
||||
// used when testing the CNI network plugin, but is added in both cases
|
||||
// for consistency and simplicity.
|
||||
glog.V(2).Infof("Adding CNI configuration on %q", host)
|
||||
klog.V(2).Infof("Adding CNI configuration on %q", host)
|
||||
cniConfigPath := filepath.Join(workspace, cniConfDirectory)
|
||||
cmd = getSSHCommand(" ; ",
|
||||
fmt.Sprintf("mkdir -p %s", cniConfigPath),
|
||||
@ -79,7 +79,7 @@ func setupCNI(host, workspace string) error {
|
||||
|
||||
// configureFirewall configures iptable firewall rules.
|
||||
func configureFirewall(host string) error {
|
||||
glog.V(2).Infof("Configure iptables firewall rules on %q", host)
|
||||
klog.V(2).Infof("Configure iptables firewall rules on %q", host)
|
||||
// TODO: consider calling bootstrap script to configure host based on OS
|
||||
output, err := SSH(host, "iptables", "-L", "INPUT")
|
||||
if err != nil {
|
||||
@ -114,7 +114,7 @@ func configureFirewall(host string) error {
|
||||
|
||||
// cleanupNodeProcesses kills all running node processes may conflict with the test.
|
||||
func cleanupNodeProcesses(host string) {
|
||||
glog.V(2).Infof("Killing any existing node processes on %q", host)
|
||||
klog.V(2).Infof("Killing any existing node processes on %q", host)
|
||||
cmd := getSSHCommand(" ; ",
|
||||
"pkill kubelet",
|
||||
"pkill kube-apiserver",
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e_node/resource_collector.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e_node/resource_collector.go
generated
vendored
@ -86,9 +86,9 @@ func NewResourceCollector(interval time.Duration) *ResourceCollector {
|
||||
// then repeatedly runs collectStats.
|
||||
func (r *ResourceCollector) Start() {
|
||||
// Get the cgroup container names for kubelet and runtime
|
||||
kubeletContainer, err := getContainerNameForProcess(kubeletProcessName, "")
|
||||
runtimeContainer, err := getContainerNameForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
if err == nil {
|
||||
kubeletContainer, err1 := getContainerNameForProcess(kubeletProcessName, "")
|
||||
runtimeContainer, err2 := getContainerNameForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
if err1 == nil && err2 == nil {
|
||||
systemContainers = map[string]string{
|
||||
stats.SystemContainerKubelet: kubeletContainer,
|
||||
stats.SystemContainerRuntime: runtimeContainer,
|
||||
@ -165,9 +165,6 @@ func (r *ResourceCollector) collectStats(oldStatsMap map[string]*cadvisorapiv2.C
|
||||
newStats := cStats.Stats[0]
|
||||
|
||||
if oldStats, ok := oldStatsMap[name]; ok && oldStats.Timestamp.Before(newStats.Timestamp) {
|
||||
if oldStats.Timestamp.Equal(newStats.Timestamp) {
|
||||
continue
|
||||
}
|
||||
r.buffers[name] = append(r.buffers[name], computeContainerResourceUsage(name, oldStats, newStats))
|
||||
}
|
||||
oldStatsMap[name] = newStats
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/BUILD
generated
vendored
@ -18,7 +18,7 @@ go_library(
|
||||
deps = [
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
19
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go
generated
vendored
@ -27,34 +27,35 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var buildDependencies = flag.Bool("build-dependencies", true, "If true, build all dependencies.")
|
||||
var ginkgoFlags = flag.String("ginkgo-flags", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
|
||||
var testFlags = flag.String("test-flags", "", "Space-separated list of arguments to pass to node e2e test.")
|
||||
var systemSpecName = flag.String("system-spec-name", "", "The name of the system spec used for validating the image in the node conformance test. The specs are at test/e2e_node/system/specs/. If unspecified, the default built-in spec (system.DefaultSpec) will be used.")
|
||||
var systemSpecName = flag.String("system-spec-name", "", "The name of the system spec used for validating the image in the node conformance test. The specs are at k8s.io/kubernetes/cmd/kubeadm/app/util/system/specs/. If unspecified, the default built-in spec (system.DefaultSpec) will be used.")
|
||||
|
||||
const (
|
||||
systemSpecPath = "test/e2e_node/system/specs"
|
||||
systemSpecPath = "k8s.io/kubernetes/cmd/kubeadm/app/util/system/specs"
|
||||
)
|
||||
|
||||
func main() {
|
||||
klog.InitFlags(nil)
|
||||
flag.Parse()
|
||||
|
||||
// Build dependencies - ginkgo, kubelet and apiserver.
|
||||
if *buildDependencies {
|
||||
if err := builder.BuildGo(); err != nil {
|
||||
glog.Fatalf("Failed to build the dependencies: %v", err)
|
||||
klog.Fatalf("Failed to build the dependencies: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run node e2e test
|
||||
outputDir, err := utils.GetK8sBuildOutputDir()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get build output directory: %v", err)
|
||||
klog.Fatalf("Failed to get build output directory: %v", err)
|
||||
}
|
||||
glog.Infof("Got build output dir: %v", outputDir)
|
||||
klog.Infof("Got build output dir: %v", outputDir)
|
||||
ginkgo := filepath.Join(outputDir, "ginkgo")
|
||||
test := filepath.Join(outputDir, "e2e_node.test")
|
||||
|
||||
@ -62,19 +63,19 @@ func main() {
|
||||
if *systemSpecName != "" {
|
||||
rootDir, err := utils.GetK8sRootDir()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get k8s root directory: %v", err)
|
||||
klog.Fatalf("Failed to get k8s root directory: %v", err)
|
||||
}
|
||||
systemSpecFile := filepath.Join(rootDir, systemSpecPath, *systemSpecName+".yaml")
|
||||
args = append(args, fmt.Sprintf("--system-spec-name=%s --system-spec-file=%s", *systemSpecName, systemSpecFile))
|
||||
}
|
||||
if err := runCommand(ginkgo, args...); err != nil {
|
||||
glog.Exitf("Test failed: %v", err)
|
||||
klog.Exitf("Test failed: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func runCommand(name string, args ...string) error {
|
||||
glog.Infof("Running command: %v %v", name, strings.Join(args, " "))
|
||||
klog.Infof("Running command: %v %v", name, strings.Join(args, " "))
|
||||
cmd := exec.Command("sudo", "sh", "-c", strings.Join(append([]string{name}, args...), " "))
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/BUILD
generated
vendored
@ -17,12 +17,12 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/runner/remote",
|
||||
deps = [
|
||||
"//test/e2e_node/remote:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go
generated
vendored
@ -37,12 +37,12 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/test/e2e_node/remote"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pborman/uuid"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
compute "google.golang.org/api/compute/v0.beta"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var testArgs = flag.String("test_args", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
|
||||
@ -61,7 +61,7 @@ var buildOnly = flag.Bool("build-only", false, "If true, build e2e_node_test.tar
|
||||
var instanceMetadata = flag.String("instance-metadata", "", "key/value metadata for instances separated by '=' or '<', 'k=v' means the key is 'k' and the value is 'v'; 'k<p' means the key is 'k' and the value is extracted from the local path 'p', e.g. k1=v1,k2<p2")
|
||||
var gubernator = flag.Bool("gubernator", false, "If true, output Gubernator link to view logs")
|
||||
var ginkgoFlags = flag.String("ginkgo-flags", "", "Passed to ginkgo to specify additional flags such as --skip=.")
|
||||
var systemSpecName = flag.String("system-spec-name", "", "The name of the system spec used for validating the image in the node conformance test. The specs are at test/e2e_node/system/specs/. If unspecified, the default built-in spec (system.DefaultSpec) will be used.")
|
||||
var systemSpecName = flag.String("system-spec-name", "", "The name of the system spec used for validating the image in the node conformance test. The specs are at k8s.io/kubernetes/cmd/kubeadm/app/util/system/specs/. If unspecified, the default built-in spec (system.DefaultSpec) will be used.")
|
||||
|
||||
// envs is the type used to collect all node envs. The key is the env name,
|
||||
// and the value is the env value
|
||||
@ -174,6 +174,7 @@ type internalGCEImage struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
klog.InitFlags(nil)
|
||||
flag.Parse()
|
||||
switch *testSuite {
|
||||
case "conformance":
|
||||
@ -185,10 +186,10 @@ func main() {
|
||||
// Use node e2e suite by default if no subcommand is specified.
|
||||
suite = remote.InitNodeE2ERemote()
|
||||
default:
|
||||
glog.Fatalf("--test-suite must be one of default or conformance")
|
||||
klog.Fatalf("--test-suite must be one of default or conformance")
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
if *buildOnly {
|
||||
// Build the archive and exit
|
||||
remote.CreateTestArchive(suite, *systemSpecName)
|
||||
@ -196,12 +197,12 @@ func main() {
|
||||
}
|
||||
|
||||
if *hosts == "" && *imageConfigFile == "" && *images == "" {
|
||||
glog.Fatalf("Must specify one of --image-config-file, --hosts, --images.")
|
||||
klog.Fatalf("Must specify one of --image-config-file, --hosts, --images.")
|
||||
}
|
||||
var err error
|
||||
computeService, err = getComputeClient()
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to create gcloud compute service using defaults. Make sure you are authenticated. %v", err)
|
||||
klog.Fatalf("Unable to create gcloud compute service using defaults. Make sure you are authenticated. %v", err)
|
||||
}
|
||||
|
||||
gceImages := &internalImageConfig{
|
||||
@ -216,12 +217,12 @@ func main() {
|
||||
// parse images
|
||||
imageConfigData, err := ioutil.ReadFile(configPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not read image config file provided: %v", err)
|
||||
klog.Fatalf("Could not read image config file provided: %v", err)
|
||||
}
|
||||
externalImageConfig := ImageConfig{Images: make(map[string]GCEImage)}
|
||||
err = yaml.Unmarshal(imageConfigData, &externalImageConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not parse image config file: %v", err)
|
||||
klog.Fatalf("Could not parse image config file: %v", err)
|
||||
}
|
||||
for shortName, imageConfig := range externalImageConfig.Images {
|
||||
var images []string
|
||||
@ -230,7 +231,7 @@ func main() {
|
||||
isRegex = true
|
||||
images, err = getGCEImages(imageConfig.ImageRegex, imageConfig.Project, imageConfig.PreviousImages)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not retrieve list of images based on image prefix %q: %v", imageConfig.ImageRegex, err)
|
||||
klog.Fatalf("Could not retrieve list of images based on image prefix %q: %v", imageConfig.ImageRegex, err)
|
||||
}
|
||||
} else {
|
||||
images = []string{imageConfig.Image}
|
||||
@ -265,7 +266,7 @@ func main() {
|
||||
// convenience; merge in with config file
|
||||
if *images != "" {
|
||||
if *imageProject == "" {
|
||||
glog.Fatal("Must specify --image-project if you specify --images")
|
||||
klog.Fatal("Must specify --image-project if you specify --images")
|
||||
}
|
||||
cliImages := strings.Split(*images, ",")
|
||||
for _, img := range cliImages {
|
||||
@ -279,16 +280,16 @@ func main() {
|
||||
}
|
||||
|
||||
if len(gceImages.images) != 0 && *zone == "" {
|
||||
glog.Fatal("Must specify --zone flag")
|
||||
klog.Fatal("Must specify --zone flag")
|
||||
}
|
||||
for shortName, image := range gceImages.images {
|
||||
if image.project == "" {
|
||||
glog.Fatalf("Invalid config for %v; must specify a project", shortName)
|
||||
klog.Fatalf("Invalid config for %v; must specify a project", shortName)
|
||||
}
|
||||
}
|
||||
if len(gceImages.images) != 0 {
|
||||
if *project == "" {
|
||||
glog.Fatal("Must specify --project flag to launch images into")
|
||||
klog.Fatal("Must specify --project flag to launch images into")
|
||||
}
|
||||
}
|
||||
if *instanceNamePrefix == "" {
|
||||
@ -394,9 +395,9 @@ func getImageMetadata(input string) *compute.Metadata {
|
||||
if input == "" {
|
||||
return nil
|
||||
}
|
||||
glog.V(3).Infof("parsing instance metadata: %q", input)
|
||||
klog.V(3).Infof("parsing instance metadata: %q", input)
|
||||
raw := parseInstanceMetadata(input)
|
||||
glog.V(4).Infof("parsed instance metadata: %v", raw)
|
||||
klog.V(4).Infof("parsed instance metadata: %v", raw)
|
||||
metadataItems := []*compute.MetadataItems{}
|
||||
for k, v := range raw {
|
||||
val := v
|
||||
@ -482,7 +483,7 @@ func getGCEImages(imageRegex, project string, previousImages int) ([]string, err
|
||||
creationTime: creationTime,
|
||||
name: instance.Name,
|
||||
}
|
||||
glog.V(4).Infof("Found image %q based on regex %q in project %q", io.string(), imageRegex, project)
|
||||
klog.V(4).Infof("Found image %q based on regex %q in project %q", io.string(), imageRegex, project)
|
||||
imageObjs = append(imageObjs, io)
|
||||
}
|
||||
}
|
||||
@ -531,12 +532,12 @@ func testImage(imageConfig *internalGCEImage, junitFilePrefix string) *TestResul
|
||||
// TODO(random-liu): Extract out and unify log collection logic with cluste e2e.
|
||||
serialPortOutput, err := computeService.Instances.GetSerialPortOutput(*project, *zone, host).Port(1).Do()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to collect serial output from node %q: %v", host, err)
|
||||
klog.Errorf("Failed to collect serial output from node %q: %v", host, err)
|
||||
} else {
|
||||
logFilename := "serial-1.log"
|
||||
err := remote.WriteLog(host, logFilename, serialPortOutput.Contents)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to write serial output from node %q to %q: %v", host, logFilename, err)
|
||||
klog.Errorf("Failed to write serial output from node %q to %q: %v", host, logFilename, err)
|
||||
}
|
||||
}
|
||||
return result
|
||||
@ -544,7 +545,7 @@ func testImage(imageConfig *internalGCEImage, junitFilePrefix string) *TestResul
|
||||
|
||||
// Provision a gce instance using image
|
||||
func createInstance(imageConfig *internalGCEImage) (string, error) {
|
||||
glog.V(1).Infof("Creating instance %+v", *imageConfig)
|
||||
klog.V(1).Infof("Creating instance %+v", *imageConfig)
|
||||
name := imageToInstanceName(imageConfig)
|
||||
i := &compute.Instance{
|
||||
Name: name,
|
||||
@ -712,10 +713,10 @@ func getComputeClient() (*compute.Service, error) {
|
||||
}
|
||||
|
||||
func deleteInstance(host string) {
|
||||
glog.Infof("Deleting instance %q", host)
|
||||
klog.Infof("Deleting instance %q", host)
|
||||
_, err := computeService.Instances.Delete(*project, *zone, host).Do()
|
||||
if err != nil {
|
||||
glog.Errorf("Error deleting instance %q: %v", host, err)
|
||||
klog.Errorf("Error deleting instance %q: %v", host, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -730,7 +731,7 @@ func parseInstanceMetadata(str string) map[string]string {
|
||||
}
|
||||
kp := strings.Split(s, "<")
|
||||
if len(kp) != 2 {
|
||||
glog.Fatalf("Invalid instance metadata: %q", s)
|
||||
klog.Fatalf("Invalid instance metadata: %q", s)
|
||||
continue
|
||||
}
|
||||
metaPath := kp[1]
|
||||
@ -739,7 +740,7 @@ func parseInstanceMetadata(str string) map[string]string {
|
||||
}
|
||||
v, err := ioutil.ReadFile(metaPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to read metadata file %q: %v", metaPath, err)
|
||||
klog.Fatalf("Failed to read metadata file %q: %v", metaPath, err)
|
||||
continue
|
||||
}
|
||||
metadata[kp[0]] = string(v)
|
||||
|
291
vendor/k8s.io/kubernetes/test/e2e_node/runtime_conformance_test.go
generated
vendored
291
vendor/k8s.io/kubernetes/test/e2e_node/runtime_conformance_test.go
generated
vendored
@ -20,226 +20,23 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/services"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const (
|
||||
consistentCheckTimeout = time.Second * 5
|
||||
retryTimeout = time.Minute * 5
|
||||
pollInterval = time.Second * 1
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
||||
f := framework.NewDefaultFramework("runtime-conformance")
|
||||
|
||||
Describe("container runtime conformance blackbox test", func() {
|
||||
Context("when starting a container that exits", func() {
|
||||
framework.ConformanceIt("it should run with the expected status [NodeConformance]", func() {
|
||||
restartCountVolumeName := "restart-count"
|
||||
restartCountVolumePath := "/restart-count"
|
||||
testContainer := v1.Container{
|
||||
Image: busyboxImage,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: restartCountVolumePath,
|
||||
Name: restartCountVolumeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
testVolumes := []v1.Volume{
|
||||
{
|
||||
Name: restartCountVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
|
||||
},
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
Name string
|
||||
RestartPolicy v1.RestartPolicy
|
||||
Phase v1.PodPhase
|
||||
State ContainerState
|
||||
RestartCount int32
|
||||
Ready bool
|
||||
}{
|
||||
{"terminate-cmd-rpa", v1.RestartPolicyAlways, v1.PodRunning, ContainerStateRunning, 2, true},
|
||||
{"terminate-cmd-rpof", v1.RestartPolicyOnFailure, v1.PodSucceeded, ContainerStateTerminated, 1, false},
|
||||
{"terminate-cmd-rpn", v1.RestartPolicyNever, v1.PodFailed, ContainerStateTerminated, 0, false},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
|
||||
// It failed at the 1st run, then succeeded at 2nd run, then run forever
|
||||
cmdScripts := `
|
||||
f=%s
|
||||
count=$(echo 'hello' >> $f ; wc -l $f | awk {'print $1'})
|
||||
if [ $count -eq 1 ]; then
|
||||
exit 1
|
||||
fi
|
||||
if [ $count -eq 2 ]; then
|
||||
exit 0
|
||||
fi
|
||||
while true; do sleep 1; done
|
||||
`
|
||||
tmpCmd := fmt.Sprintf(cmdScripts, path.Join(restartCountVolumePath, "restartCount"))
|
||||
testContainer.Name = testCase.Name
|
||||
testContainer.Command = []string{"sh", "-c", tmpCmd}
|
||||
terminateContainer := ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
Container: testContainer,
|
||||
RestartPolicy: testCase.RestartPolicy,
|
||||
Volumes: testVolumes,
|
||||
PodSecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0",
|
||||
},
|
||||
},
|
||||
}
|
||||
terminateContainer.Create()
|
||||
defer terminateContainer.Delete()
|
||||
|
||||
By("it should get the expected 'RestartCount'")
|
||||
Eventually(func() (int32, error) {
|
||||
status, err := terminateContainer.GetStatus()
|
||||
return status.RestartCount, err
|
||||
}, retryTimeout, pollInterval).Should(Equal(testCase.RestartCount))
|
||||
|
||||
By("it should get the expected 'Phase'")
|
||||
Eventually(terminateContainer.GetPhase, retryTimeout, pollInterval).Should(Equal(testCase.Phase))
|
||||
|
||||
By("it should get the expected 'Ready' condition")
|
||||
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
|
||||
|
||||
status, err := terminateContainer.GetStatus()
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("it should get the expected 'State'")
|
||||
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
|
||||
|
||||
By("it should be possible to delete [Conformance][NodeConformance]")
|
||||
Expect(terminateContainer.Delete()).To(Succeed())
|
||||
Eventually(terminateContainer.Present, retryTimeout, pollInterval).Should(BeFalse())
|
||||
}
|
||||
})
|
||||
|
||||
rootUser := int64(0)
|
||||
nonRootUser := int64(10000)
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
container v1.Container
|
||||
phase v1.PodPhase
|
||||
message gomegatypes.GomegaMatcher
|
||||
}{
|
||||
{
|
||||
name: "if TerminationMessagePath is set [Conformance][NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo -n DONE > /dev/termination-log"},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &rootUser,
|
||||
},
|
||||
},
|
||||
phase: v1.PodSucceeded,
|
||||
message: Equal("DONE"),
|
||||
},
|
||||
|
||||
{
|
||||
name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance][NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo -n DONE > /dev/termination-custom-log"},
|
||||
TerminationMessagePath: "/dev/termination-custom-log",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &nonRootUser,
|
||||
},
|
||||
},
|
||||
phase: v1.PodSucceeded,
|
||||
message: Equal("DONE"),
|
||||
},
|
||||
|
||||
{
|
||||
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance][NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo -n DONE; /bin/false"},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
||||
},
|
||||
phase: v1.PodFailed,
|
||||
message: Equal("DONE\n"),
|
||||
},
|
||||
|
||||
{
|
||||
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo DONE; /bin/true"},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
||||
},
|
||||
phase: v1.PodSucceeded,
|
||||
message: Equal(""),
|
||||
},
|
||||
|
||||
{
|
||||
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance][NodeConformance]",
|
||||
container: v1.Container{
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"/bin/echo -n OK > /dev/termination-log; /bin/echo DONE; /bin/true"},
|
||||
TerminationMessagePath: "/dev/termination-log",
|
||||
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
||||
},
|
||||
phase: v1.PodSucceeded,
|
||||
message: Equal("OK"),
|
||||
},
|
||||
} {
|
||||
It(fmt.Sprintf("should report termination message %s", testCase.name), func() {
|
||||
testCase.container.Name = "termination-message-container"
|
||||
c := ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
Container: testCase.container,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
}
|
||||
|
||||
By("create the container")
|
||||
c.Create()
|
||||
defer c.Delete()
|
||||
|
||||
By(fmt.Sprintf("wait for the container to reach %s", testCase.phase))
|
||||
Eventually(c.GetPhase, retryTimeout, pollInterval).Should(Equal(testCase.phase))
|
||||
|
||||
By("get the container status")
|
||||
status, err := c.GetStatus()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("the container should be terminated")
|
||||
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
|
||||
|
||||
By("the termination message should be set")
|
||||
Expect(status.State.Terminated.Message).Should(testCase.message)
|
||||
|
||||
By("delete the container")
|
||||
Expect(c.Delete()).To(Succeed())
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Context("when running a container with a new image", func() {
|
||||
// The service account only has pull permission
|
||||
@ -252,71 +49,27 @@ while true; do sleep 1; done
|
||||
}
|
||||
}
|
||||
}`
|
||||
secret := &v1.Secret{
|
||||
Data: map[string][]byte{v1.DockerConfigJsonKey: []byte(auth)},
|
||||
Type: v1.SecretTypeDockerConfigJson,
|
||||
}
|
||||
// The following images are not added into NodeImageWhiteList, because this test is
|
||||
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
|
||||
// is v1.PullAlways, so it won't be blocked by framework image white list check.
|
||||
for _, testCase := range []struct {
|
||||
description string
|
||||
image string
|
||||
secret bool
|
||||
credentialProvider bool
|
||||
phase v1.PodPhase
|
||||
waiting bool
|
||||
description string
|
||||
image string
|
||||
phase v1.PodPhase
|
||||
waiting bool
|
||||
}{
|
||||
{
|
||||
description: "should not be able to pull image from invalid registry",
|
||||
image: "invalid.com/invalid/alpine:3.1",
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull non-existing image from gcr.io",
|
||||
image: "k8s.gcr.io/invalid-image:invalid-tag",
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from gcr.io",
|
||||
image: "k8s.gcr.io/alpine-with-bash:1.0",
|
||||
description: "should be able to pull from private registry with credential provider",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.7",
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from docker hub",
|
||||
image: "alpine:3.1",
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull from private registry without secret",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull from private registry with secret",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
secret: true,
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull from private registry with credential provider",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
credentialProvider: true,
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
} {
|
||||
testCase := testCase
|
||||
It(testCase.description+" [Conformance][NodeConformance]", func() {
|
||||
It(testCase.description+" [NodeConformance]", func() {
|
||||
name := "image-pull-test"
|
||||
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
|
||||
container := ConformanceContainer{
|
||||
container := common.ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
Container: v1.Container{
|
||||
Name: name,
|
||||
@ -327,20 +80,12 @@ while true; do sleep 1; done
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
}
|
||||
if testCase.secret {
|
||||
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
|
||||
By("create image pull secret")
|
||||
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
|
||||
container.ImagePullSecrets = []string{secret.Name}
|
||||
}
|
||||
if testCase.credentialProvider {
|
||||
configFile := filepath.Join(services.KubeletRootDirectory, "config.json")
|
||||
err := ioutil.WriteFile(configFile, []byte(auth), 0644)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer os.Remove(configFile)
|
||||
}
|
||||
|
||||
configFile := filepath.Join(services.KubeletRootDirectory, "config.json")
|
||||
err := ioutil.WriteFile(configFile, []byte(auth), 0644)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer os.Remove(configFile)
|
||||
|
||||
// checkContainerStatus checks whether the container status matches expectation.
|
||||
checkContainerStatus := func() error {
|
||||
status, err := container.GetStatus()
|
||||
@ -354,13 +99,13 @@ while true; do sleep 1; done
|
||||
if !testCase.waiting {
|
||||
if status.State.Running == nil {
|
||||
return fmt.Errorf("expected container state: Running, got: %q",
|
||||
GetContainerState(status.State))
|
||||
common.GetContainerState(status.State))
|
||||
}
|
||||
}
|
||||
if testCase.waiting {
|
||||
if status.State.Waiting == nil {
|
||||
return fmt.Errorf("expected container state: Waiting, got: %q",
|
||||
GetContainerState(status.State))
|
||||
common.GetContainerState(status.State))
|
||||
}
|
||||
reason := status.State.Waiting.Reason
|
||||
if reason != images.ErrImagePull.Error() &&
|
||||
@ -386,7 +131,7 @@ while true; do sleep 1; done
|
||||
By("create the container")
|
||||
container.Create()
|
||||
By("check the container status")
|
||||
for start := time.Now(); time.Since(start) < retryTimeout; time.Sleep(pollInterval) {
|
||||
for start := time.Now(); time.Since(start) < common.ContainerStatusRetryTimeout; time.Sleep(common.ContainerStatusPollInterval) {
|
||||
if err = checkContainerStatus(); err == nil {
|
||||
break
|
||||
}
|
||||
|
197
vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go
generated
vendored
197
vendor/k8s.io/kubernetes/test/e2e_node/security_context_test.go
generated
vendored
@ -50,12 +50,12 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container-1",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/top"},
|
||||
},
|
||||
{
|
||||
Name: "test-container-2",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sleep"},
|
||||
Args: []string{"10000"},
|
||||
},
|
||||
@ -91,12 +91,12 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container-1",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/top"},
|
||||
},
|
||||
{
|
||||
Name: "test-container-2",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sleep"},
|
||||
Args: []string{"10000"},
|
||||
},
|
||||
@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
BeforeEach(func() {
|
||||
nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID())
|
||||
podClient.CreateSync(makeHostPidPod(nginxPodName,
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
imageutils.GetE2EImage(imageutils.Nginx),
|
||||
nil,
|
||||
true,
|
||||
))
|
||||
@ -350,177 +350,6 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("When creating a container with runAsUser", func() {
|
||||
makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: podName,
|
||||
Command: command,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &userid,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndWaitUserPod := func(userid int64) {
|
||||
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
|
||||
podClient.Create(makeUserPod(podName,
|
||||
busyboxImage,
|
||||
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
|
||||
userid,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
It("should run the container with uid 65534 [NodeConformance]", func() {
|
||||
createAndWaitUserPod(65534)
|
||||
})
|
||||
|
||||
It("should run the container with uid 0 [NodeConformance]", func() {
|
||||
createAndWaitUserPod(0)
|
||||
})
|
||||
})
|
||||
|
||||
Context("When creating a pod with readOnlyRootFilesystem", func() {
|
||||
makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: podName,
|
||||
Command: command,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string {
|
||||
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
|
||||
podClient.Create(makeUserPod(podName,
|
||||
"busybox",
|
||||
[]string{"sh", "-c", "touch checkfile"},
|
||||
readOnlyRootFilesystem,
|
||||
))
|
||||
|
||||
if readOnlyRootFilesystem {
|
||||
podClient.WaitForFailure(podName, framework.PodStartTimeout)
|
||||
} else {
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
return podName
|
||||
}
|
||||
|
||||
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]", func() {
|
||||
createAndWaitUserPod(true)
|
||||
})
|
||||
|
||||
It("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() {
|
||||
createAndWaitUserPod(false)
|
||||
})
|
||||
})
|
||||
|
||||
Context("when creating containers with AllowPrivilegeEscalation", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
if framework.TestContext.ContainerRuntime == "docker" {
|
||||
isSupported, err := isDockerNoNewPrivilegesSupported()
|
||||
framework.ExpectNoError(err)
|
||||
if !isSupported {
|
||||
framework.Skipf("Skipping because no_new_privs is not supported in this docker")
|
||||
}
|
||||
// It turns out SELinux policy in RHEL 7 does not play well with
|
||||
// the "NoNewPrivileges" flag. So let's skip this test when running
|
||||
// with SELinux support enabled.
|
||||
//
|
||||
// TODO(filbranden): Remove this after the fix for
|
||||
// https://github.com/projectatomic/container-selinux/issues/45
|
||||
// has been backported to RHEL 7 (expected on RHEL 7.5)
|
||||
selinuxEnabled, err := isDockerSELinuxSupportEnabled()
|
||||
framework.ExpectNoError(err)
|
||||
if selinuxEnabled {
|
||||
framework.Skipf("Skipping because Docker daemon is running with SELinux support enabled")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||
Name: podName,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: allowPrivilegeEscalation,
|
||||
RunAsUser: &uid,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
|
||||
podClient.Create(makeAllowPrivilegeEscalationPod(podName,
|
||||
allowPrivilegeEscalation,
|
||||
uid,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
|
||||
if err := podClient.MatchContainerOutput(podName, podName, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
It("should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]", func() {
|
||||
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
|
||||
framework.Failf("Match output for pod %q failed: %v", podName, err)
|
||||
}
|
||||
})
|
||||
|
||||
It("should not allow privilege escalation when false [NodeConformance]", func() {
|
||||
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
|
||||
apeFalse := false
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
|
||||
framework.Failf("Match output for pod %q failed: %v", podName, err)
|
||||
}
|
||||
})
|
||||
|
||||
It("should allow privilege escalation when true [NodeConformance]", func() {
|
||||
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
|
||||
apeTrue := true
|
||||
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
|
||||
framework.Failf("Match output for pod %q failed: %v", podName, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("When creating a pod with privileged", func() {
|
||||
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
@ -549,9 +378,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"},
|
||||
privileged,
|
||||
))
|
||||
|
||||
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
|
||||
|
||||
return podName
|
||||
}
|
||||
|
||||
@ -567,19 +394,5 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
framework.Failf("privileged container should be able to create dummy device")
|
||||
}
|
||||
})
|
||||
|
||||
It("should run the container as unprivileged when false [NodeConformance]", func() {
|
||||
podName := createAndWaitUserPod(false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||
}
|
||||
|
||||
framework.Logf("Got logs for pod %q: %q", podName, logs)
|
||||
if !strings.Contains(logs, "Operation not permitted") {
|
||||
framework.Failf("unprivileged container shouldn't be able to create dummy device")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
29
vendor/k8s.io/kubernetes/test/e2e_node/services/BUILD
generated
vendored
29
vendor/k8s.io/kubernetes/test/e2e_node/services/BUILD
generated
vendored
@ -9,7 +9,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"apiserver.go",
|
||||
"etcd.go",
|
||||
"internal_services.go",
|
||||
"kubelet.go",
|
||||
"logs.go",
|
||||
@ -25,27 +24,25 @@ go_library(
|
||||
"//cmd/kubelet/app/options:go_default_library",
|
||||
"//pkg/controller/namespace:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/util/codec:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e_node/builder:go_default_library",
|
||||
"//test/e2e_node/remote:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
27
vendor/k8s.io/kubernetes/test/e2e_node/services/apiserver.go
generated
vendored
27
vendor/k8s.io/kubernetes/test/e2e_node/services/apiserver.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
apiserver "k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
)
|
||||
@ -31,21 +32,23 @@ const (
|
||||
)
|
||||
|
||||
// APIServer is a server which manages apiserver.
|
||||
type APIServer struct{}
|
||||
type APIServer struct {
|
||||
storageConfig storagebackend.Config
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewAPIServer creates an apiserver.
|
||||
func NewAPIServer() *APIServer {
|
||||
return &APIServer{}
|
||||
func NewAPIServer(storageConfig storagebackend.Config) *APIServer {
|
||||
return &APIServer{
|
||||
storageConfig: storageConfig,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the apiserver, returns when apiserver is ready.
|
||||
func (a *APIServer) Start() error {
|
||||
o := options.NewServerRunOptions()
|
||||
o.Etcd.StorageConfig.ServerList = []string{getEtcdClientURL()}
|
||||
// TODO: Current setup of etcd in e2e-node tests doesn't support etcd v3
|
||||
// protocol. We should migrate it to use the same infrastructure as all
|
||||
// other tests (pkg/storage/etcd/testing).
|
||||
o.Etcd.StorageConfig.Type = "etcd2"
|
||||
o.Etcd.StorageConfig = a.storageConfig
|
||||
_, ipnet, err := net.ParseCIDR(clusterIPRange)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -56,14 +59,12 @@ func (a *APIServer) Start() error {
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
completedOptions, err := apiserver.Complete(o)
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("set apiserver default options error: %v", err)
|
||||
return
|
||||
}
|
||||
err = apiserver.Run(completedOptions, stopCh)
|
||||
err = apiserver.Run(completedOptions, a.stopCh)
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("run apiserver error: %v", err)
|
||||
return
|
||||
@ -80,6 +81,10 @@ func (a *APIServer) Start() error {
|
||||
// Stop stops the apiserver. Currently, there is no way to stop the apiserver.
|
||||
// The function is here only for completion.
|
||||
func (a *APIServer) Stop() error {
|
||||
if a.stopCh != nil {
|
||||
close(a.stopCh)
|
||||
a.stopCh = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
161
vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go
generated
vendored
161
vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v2http"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// TODO: These tests should not be leveraging v2http
|
||||
// TODO(random-liu): Add service interface to manage services with the same behaviour.
|
||||
|
||||
// All following configurations are got from etcd source code.
|
||||
// TODO(random-liu): Use embed.NewConfig after etcd3 is supported.
|
||||
const (
|
||||
etcdName = "etcd"
|
||||
clientURLStr = "http://localhost:4001" // clientURL has listener created and handles etcd API traffic
|
||||
peerURLStr = "http://localhost:7001" // peerURL does't have listener created, it is used to pass Etcd validation
|
||||
snapCount = etcdserver.DefaultSnapCount
|
||||
maxSnapFiles = 5
|
||||
maxWALFiles = 5
|
||||
tickMs = 100
|
||||
electionTicks = 10
|
||||
etcdHealthCheckURL = clientURLStr + "/v2/keys/" // Trailing slash is required,
|
||||
)
|
||||
|
||||
// EtcdServer is a server which manages etcd.
|
||||
type EtcdServer struct {
|
||||
*etcdserver.EtcdServer
|
||||
config *etcdserver.ServerConfig
|
||||
clientListen net.Listener
|
||||
}
|
||||
|
||||
// NewEtcd creates a new default etcd server using 'dataDir' for persistence.
|
||||
func NewEtcd(dataDir string) *EtcdServer {
|
||||
clientURLs, err := types.NewURLs([]string{clientURLStr})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to parse client url %q: %v", clientURLStr, err)
|
||||
}
|
||||
peerURLs, err := types.NewURLs([]string{peerURLStr})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to parse peer url %q: %v", peerURLStr, err)
|
||||
}
|
||||
|
||||
config := &etcdserver.ServerConfig{
|
||||
Name: etcdName,
|
||||
ClientURLs: clientURLs,
|
||||
PeerURLs: peerURLs,
|
||||
DataDir: dataDir,
|
||||
InitialPeerURLsMap: map[string]types.URLs{etcdName: peerURLs},
|
||||
NewCluster: true,
|
||||
SnapCount: snapCount,
|
||||
MaxSnapFiles: maxSnapFiles,
|
||||
MaxWALFiles: maxWALFiles,
|
||||
TickMs: tickMs,
|
||||
ElectionTicks: electionTicks,
|
||||
AuthToken: "simple",
|
||||
}
|
||||
|
||||
return &EtcdServer{
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the etcd server and listening for client connections
|
||||
func (e *EtcdServer) Start() error {
|
||||
var err error
|
||||
e.EtcdServer, err = etcdserver.NewServer(e.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create client listener, there should be only one url
|
||||
e.clientListen, err = createListener(e.config.ClientURLs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// start etcd
|
||||
e.EtcdServer.Start()
|
||||
|
||||
// setup client listener
|
||||
ch := v2http.NewClientHandler(e.EtcdServer, e.config.ReqTimeout())
|
||||
errCh := make(chan error)
|
||||
go func(l net.Listener) {
|
||||
defer close(errCh)
|
||||
srv := &http.Server{
|
||||
Handler: ch,
|
||||
ReadTimeout: 5 * time.Minute,
|
||||
}
|
||||
// Serve always returns a non-nil error.
|
||||
errCh <- srv.Serve(l)
|
||||
}(e.clientListen)
|
||||
|
||||
err = readinessCheck("etcd", []string{etcdHealthCheckURL}, errCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop closes all connections and stops the Etcd server
|
||||
func (e *EtcdServer) Stop() error {
|
||||
if e.EtcdServer != nil {
|
||||
e.EtcdServer.Stop()
|
||||
}
|
||||
if e.clientListen != nil {
|
||||
err := e.clientListen.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns the server's unique name
|
||||
func (e *EtcdServer) Name() string {
|
||||
return etcdName
|
||||
}
|
||||
|
||||
func createListener(url url.URL) (net.Listener, error) {
|
||||
l, err := net.Listen("tcp", url.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l, err = transport.NewKeepAliveListener(l, url.Scheme, &tls.Config{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func getEtcdClientURL() string {
|
||||
return clientURLStr
|
||||
}
|
||||
|
||||
func getEtcdHealthCheckURL() string {
|
||||
return etcdHealthCheckURL
|
||||
}
|
75
vendor/k8s.io/kubernetes/test/e2e_node/services/internal_services.go
generated
vendored
75
vendor/k8s.io/kubernetes/test/e2e_node/services/internal_services.go
generated
vendored
@ -17,19 +17,22 @@ limitations under the License.
|
||||
package services
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// e2eService manages e2e services in current process.
|
||||
type e2eServices struct {
|
||||
rmDirs []string
|
||||
// statically linked e2e services
|
||||
etcdServer *EtcdServer
|
||||
etcdServer *etcdtesting.EtcdTestServer
|
||||
etcdStorage *storagebackend.Config
|
||||
apiServer *APIServer
|
||||
nsController *NamespaceController
|
||||
}
|
||||
@ -40,9 +43,9 @@ func newE2EServices() *e2eServices {
|
||||
|
||||
// run starts all e2e services and wait for the termination signal. Once receives the
|
||||
// termination signal, it will stop the e2e services gracefully.
|
||||
func (es *e2eServices) run() error {
|
||||
defer es.stop()
|
||||
if err := es.start(); err != nil {
|
||||
func (es *e2eServices) run(t *testing.T) error {
|
||||
defer es.stop(t)
|
||||
if err := es.start(t); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait until receiving a termination signal.
|
||||
@ -51,13 +54,13 @@ func (es *e2eServices) run() error {
|
||||
}
|
||||
|
||||
// start starts the tests embedded services or returns an error.
|
||||
func (es *e2eServices) start() error {
|
||||
glog.Info("Starting e2e services...")
|
||||
err := es.startEtcd()
|
||||
func (es *e2eServices) start(t *testing.T) error {
|
||||
klog.Info("Starting e2e services...")
|
||||
err := es.startEtcd(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = es.startApiServer()
|
||||
err = es.startApiServer(es.etcdStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -65,71 +68,64 @@ func (es *e2eServices) start() error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
glog.Info("E2E services started.")
|
||||
klog.Info("E2E services started.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop stops the embedded e2e services.
|
||||
func (es *e2eServices) stop() {
|
||||
glog.Info("Stopping e2e services...")
|
||||
func (es *e2eServices) stop(t *testing.T) {
|
||||
klog.Info("Stopping e2e services...")
|
||||
// TODO(random-liu): Use a loop to stop all services after introducing
|
||||
// service interface.
|
||||
glog.Info("Stopping namespace controller")
|
||||
klog.Info("Stopping namespace controller")
|
||||
if es.nsController != nil {
|
||||
if err := es.nsController.Stop(); err != nil {
|
||||
glog.Errorf("Failed to stop %q: %v", es.nsController.Name(), err)
|
||||
klog.Errorf("Failed to stop %q: %v", es.nsController.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Info("Stopping API server")
|
||||
klog.Info("Stopping API server")
|
||||
if es.apiServer != nil {
|
||||
if err := es.apiServer.Stop(); err != nil {
|
||||
glog.Errorf("Failed to stop %q: %v", es.apiServer.Name(), err)
|
||||
klog.Errorf("Failed to stop %q: %v", es.apiServer.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Info("Stopping etcd")
|
||||
klog.Info("Stopping etcd")
|
||||
if es.etcdServer != nil {
|
||||
if err := es.etcdServer.Stop(); err != nil {
|
||||
glog.Errorf("Failed to stop %q: %v", es.etcdServer.Name(), err)
|
||||
}
|
||||
es.etcdServer.Terminate(t)
|
||||
}
|
||||
|
||||
for _, d := range es.rmDirs {
|
||||
glog.Infof("Deleting directory %v", d)
|
||||
klog.Infof("Deleting directory %v", d)
|
||||
err := os.RemoveAll(d)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete directory %s.\n%v", d, err)
|
||||
klog.Errorf("Failed to delete directory %s.\n%v", d, err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Info("E2E services stopped.")
|
||||
klog.Info("E2E services stopped.")
|
||||
}
|
||||
|
||||
// startEtcd starts the embedded etcd instance or returns an error.
|
||||
func (es *e2eServices) startEtcd() error {
|
||||
glog.Info("Starting etcd")
|
||||
// Create data directory in current working space.
|
||||
dataDir, err := ioutil.TempDir(".", "etcd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Mark the dataDir as directories to remove.
|
||||
es.rmDirs = append(es.rmDirs, dataDir)
|
||||
es.etcdServer = NewEtcd(dataDir)
|
||||
return es.etcdServer.Start()
|
||||
func (es *e2eServices) startEtcd(t *testing.T) error {
|
||||
klog.Info("Starting etcd")
|
||||
server, etcdStorage := etcdtesting.NewUnsecuredEtcd3TestClientServer(t)
|
||||
es.etcdServer = server
|
||||
es.etcdStorage = etcdStorage
|
||||
return nil
|
||||
}
|
||||
|
||||
// startApiServer starts the embedded API server or returns an error.
|
||||
func (es *e2eServices) startApiServer() error {
|
||||
glog.Info("Starting API server")
|
||||
es.apiServer = NewAPIServer()
|
||||
func (es *e2eServices) startApiServer(etcdStorage *storagebackend.Config) error {
|
||||
klog.Info("Starting API server")
|
||||
es.apiServer = NewAPIServer(*etcdStorage)
|
||||
return es.apiServer.Start()
|
||||
}
|
||||
|
||||
// startNamespaceController starts the embedded namespace controller or returns an error.
|
||||
func (es *e2eServices) startNamespaceController() error {
|
||||
glog.Info("Starting namespace controller")
|
||||
klog.Info("Starting namespace controller")
|
||||
es.nsController = NewNamespaceController(framework.TestContext.Host)
|
||||
return es.nsController.Start()
|
||||
}
|
||||
@ -137,7 +133,6 @@ func (es *e2eServices) startNamespaceController() error {
|
||||
// getServicesHealthCheckURLs returns the health check urls for the internal services.
|
||||
func getServicesHealthCheckURLs() []string {
|
||||
return []string{
|
||||
getEtcdHealthCheckURL(),
|
||||
getAPIServerHealthCheckURL(),
|
||||
}
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e_node/services/kubelet.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e_node/services/kubelet.go
generated
vendored
@ -26,16 +26,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/klog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilflag "k8s.io/apiserver/pkg/util/flag"
|
||||
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
|
||||
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
@ -83,7 +83,7 @@ func RunKubelet() {
|
||||
defer e.Stop()
|
||||
e.kubelet, err = e.startKubelet()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to start kubelet: %v", err)
|
||||
klog.Fatalf("Failed to start kubelet: %v", err)
|
||||
}
|
||||
// Wait until receiving a termination signal.
|
||||
waitForTerminationSignal()
|
||||
@ -105,7 +105,7 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
return nil, fmt.Errorf("the --hyperkube-image option must be set")
|
||||
}
|
||||
|
||||
glog.Info("Starting kubelet")
|
||||
klog.Info("Starting kubelet")
|
||||
|
||||
// set feature gates so we can check which features are enabled and pass the appropriate flags
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(framework.TestContext.FeatureGates)
|
||||
@ -159,7 +159,7 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "file-check-frequency")
|
||||
|
||||
// Assign a fixed CIDR to the node because there is no node controller.
|
||||
// Note: this MUST be in sync with with the IP in
|
||||
// Note: this MUST be in sync with the IP in
|
||||
// - cluster/gce/config-test.sh and
|
||||
// - test/e2e_node/conformance/run_test.sh.
|
||||
kc.PodCIDR = "10.100.0.0/24"
|
||||
@ -430,7 +430,7 @@ func kubeletConfigCWDPath() (string, error) {
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get current working directory: %v", err)
|
||||
}
|
||||
// DO NOT name this file "kubelet" - you will overwrite the the kubelet binary and be very confused :)
|
||||
// DO NOT name this file "kubelet" - you will overwrite the kubelet binary and be very confused :)
|
||||
return filepath.Join(cwd, "kubelet-config"), nil
|
||||
}
|
||||
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e_node/services/server.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e_node/services/server.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
@ -99,7 +99,7 @@ func (s *server) String() string {
|
||||
//
|
||||
// Note: restartOnExit == true requires len(s.healthCheckUrls) > 0 to work properly.
|
||||
func (s *server) start() error {
|
||||
glog.Infof("Starting server %q with command %q", s.name, commandToString(s.startCommand))
|
||||
klog.Infof("Starting server %q with command %q", s.name, commandToString(s.startCommand))
|
||||
errCh := make(chan error)
|
||||
|
||||
// Set up restart channels if the server is configured for restart on exit.
|
||||
@ -127,7 +127,7 @@ func (s *server) start() error {
|
||||
errCh <- fmt.Errorf("failed to create file %q for `%s` %v.", outPath, s, err)
|
||||
return
|
||||
} else {
|
||||
glog.Infof("Output file for server %q: %v", s.name, outfile.Name())
|
||||
klog.Infof("Output file for server %q: %v", s.name, outfile.Name())
|
||||
}
|
||||
defer outfile.Close()
|
||||
defer outfile.Sync()
|
||||
@ -158,7 +158,7 @@ func (s *server) start() error {
|
||||
return
|
||||
}
|
||||
if !s.restartOnExit {
|
||||
glog.Infof("Waiting for server %q start command to complete", s.name)
|
||||
klog.Infof("Waiting for server %q start command to complete", s.name)
|
||||
// If we aren't planning on restarting, ok to Wait() here to release resources.
|
||||
// Otherwise, we Wait() in the restart loop.
|
||||
err = s.startCommand.Wait()
|
||||
@ -169,18 +169,18 @@ func (s *server) start() error {
|
||||
} else {
|
||||
usedStartCmd := true
|
||||
for {
|
||||
glog.Infof("Running health check for service %q", s.name)
|
||||
klog.Infof("Running health check for service %q", s.name)
|
||||
// Wait for an initial health check to pass, so that we are sure the server started.
|
||||
err := readinessCheck(s.name, s.healthCheckUrls, nil)
|
||||
if err != nil {
|
||||
if usedStartCmd {
|
||||
glog.Infof("Waiting for server %q start command to complete after initial health check failed", s.name)
|
||||
klog.Infof("Waiting for server %q start command to complete after initial health check failed", s.name)
|
||||
s.startCommand.Wait() // Release resources if necessary.
|
||||
}
|
||||
// This should not happen, immediately stop the e2eService process.
|
||||
glog.Fatalf("Restart loop readinessCheck failed for %s", s)
|
||||
klog.Fatalf("Restart loop readinessCheck failed for %s", s)
|
||||
} else {
|
||||
glog.Infof("Initial health check passed for service %q", s.name)
|
||||
klog.Infof("Initial health check passed for service %q", s.name)
|
||||
}
|
||||
|
||||
// Initial health check passed, wait until a health check fails again.
|
||||
@ -220,11 +220,11 @@ func (s *server) start() error {
|
||||
}
|
||||
// Run and wait for exit. This command is assumed to have
|
||||
// short duration, e.g. systemctl restart
|
||||
glog.Infof("Restarting server %q with restart command", s.name)
|
||||
klog.Infof("Restarting server %q with restart command", s.name)
|
||||
err = s.restartCommand.Run()
|
||||
if err != nil {
|
||||
// This should not happen, immediately stop the e2eService process.
|
||||
glog.Fatalf("Restarting server %s with restartCommand failed. Error: %v.", s, err)
|
||||
klog.Fatalf("Restarting server %s with restartCommand failed. Error: %v.", s, err)
|
||||
}
|
||||
} else {
|
||||
s.startCommand = &exec.Cmd{
|
||||
@ -238,12 +238,12 @@ func (s *server) start() error {
|
||||
ExtraFiles: s.startCommand.ExtraFiles,
|
||||
SysProcAttr: s.startCommand.SysProcAttr,
|
||||
}
|
||||
glog.Infof("Restarting server %q with start command", s.name)
|
||||
klog.Infof("Restarting server %q with start command", s.name)
|
||||
err = s.startCommand.Start()
|
||||
usedStartCmd = true
|
||||
if err != nil {
|
||||
// This should not happen, immediately stop the e2eService process.
|
||||
glog.Fatalf("Restarting server %s with startCommand failed. Error: %v.", s, err)
|
||||
klog.Fatalf("Restarting server %s with startCommand failed. Error: %v.", s, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -255,7 +255,7 @@ func (s *server) start() error {
|
||||
|
||||
// kill runs the server's kill command.
|
||||
func (s *server) kill() error {
|
||||
glog.Infof("Kill server %q", s.name)
|
||||
klog.Infof("Kill server %q", s.name)
|
||||
name := s.name
|
||||
cmd := s.startCommand
|
||||
|
||||
@ -274,7 +274,7 @@ func (s *server) kill() error {
|
||||
}
|
||||
|
||||
if cmd.Process == nil {
|
||||
glog.V(2).Infof("%q not running", name)
|
||||
klog.V(2).Infof("%q not running", name)
|
||||
return nil
|
||||
}
|
||||
pid := cmd.Process.Pid
|
||||
@ -292,11 +292,11 @@ func (s *server) kill() error {
|
||||
|
||||
const timeout = 10 * time.Second
|
||||
for _, signal := range []string{"-TERM", "-KILL"} {
|
||||
glog.V(2).Infof("Killing process %d (%s) with %s", pid, name, signal)
|
||||
klog.V(2).Infof("Killing process %d (%s) with %s", pid, name, signal)
|
||||
cmd := exec.Command("kill", signal, strconv.Itoa(pid))
|
||||
_, err := cmd.Output()
|
||||
if err != nil {
|
||||
glog.Errorf("Error signaling process %d (%s) with %s: %v", pid, name, signal, err)
|
||||
klog.Errorf("Error signaling process %d (%s) with %s: %v", pid, name, signal, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
25
vendor/k8s.io/kubernetes/test/e2e_node/services/services.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e_node/services/services.go
generated
vendored
@ -22,9 +22,10 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/kardianos/osext"
|
||||
"k8s.io/klog"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -85,19 +86,19 @@ func (e *E2EServices) Stop() {
|
||||
}()
|
||||
if e.services != nil {
|
||||
if err := e.services.kill(); err != nil {
|
||||
glog.Errorf("Failed to stop services: %v", err)
|
||||
klog.Errorf("Failed to stop services: %v", err)
|
||||
}
|
||||
}
|
||||
if e.kubelet != nil {
|
||||
if err := e.kubelet.kill(); err != nil {
|
||||
glog.Errorf("Failed to stop kubelet: %v", err)
|
||||
klog.Errorf("Failed to stop kubelet: %v", err)
|
||||
}
|
||||
}
|
||||
if e.rmDirs != nil {
|
||||
for _, d := range e.rmDirs {
|
||||
err := os.RemoveAll(d)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete directory %s: %v", d, err)
|
||||
klog.Errorf("Failed to delete directory %s: %v", d, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -105,13 +106,13 @@ func (e *E2EServices) Stop() {
|
||||
|
||||
// RunE2EServices actually start the e2e services. This function is used to
|
||||
// start e2e services in current process. This is only used in run-services-mode.
|
||||
func RunE2EServices() {
|
||||
func RunE2EServices(t *testing.T) {
|
||||
// Populate global DefaultFeatureGate with value from TestContext.FeatureGates.
|
||||
// This way, statically-linked components see the same feature gate config as the test context.
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(framework.TestContext.FeatureGates)
|
||||
e := newE2EServices()
|
||||
if err := e.run(); err != nil {
|
||||
glog.Fatalf("Failed to run e2e services: %v", err)
|
||||
if err := e.run(t); err != nil {
|
||||
klog.Fatalf("Failed to run e2e services: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,7 +143,7 @@ func (e *E2EServices) collectLogFiles() {
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
return
|
||||
}
|
||||
glog.Info("Fetching log files...")
|
||||
klog.Info("Fetching log files...")
|
||||
journaldFound := isJournaldAvailable()
|
||||
for targetFileName, log := range e.logs {
|
||||
targetLink := path.Join(framework.TestContext.ReportDir, targetFileName)
|
||||
@ -151,13 +152,13 @@ func (e *E2EServices) collectLogFiles() {
|
||||
if len(log.JournalctlCommand) == 0 {
|
||||
continue
|
||||
}
|
||||
glog.Infof("Get log file %q with journalctl command %v.", targetFileName, log.JournalctlCommand)
|
||||
klog.Infof("Get log file %q with journalctl command %v.", targetFileName, log.JournalctlCommand)
|
||||
out, err := exec.Command("journalctl", log.JournalctlCommand...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get %q from journald: %v, %v", targetFileName, string(out), err)
|
||||
klog.Errorf("failed to get %q from journald: %v, %v", targetFileName, string(out), err)
|
||||
} else {
|
||||
if err = ioutil.WriteFile(targetLink, out, 0644); err != nil {
|
||||
glog.Errorf("failed to write logs to %q: %v", targetLink, err)
|
||||
klog.Errorf("failed to write logs to %q: %v", targetLink, err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
@ -168,7 +169,7 @@ func (e *E2EServices) collectLogFiles() {
|
||||
continue
|
||||
}
|
||||
if err := copyLogFile(file, targetLink); err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e_node/services/util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e_node/services/util.go
generated
vendored
@ -18,7 +18,7 @@ package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
@ -41,7 +41,7 @@ func waitForTerminationSignal() {
|
||||
// check URLs. Once there is an error in errCh, the function will stop waiting
|
||||
// and return the error.
|
||||
func readinessCheck(name string, urls []string, errCh <-chan error) error {
|
||||
glog.Infof("Running readiness check for service %q", name)
|
||||
klog.Infof("Running readiness check for service %q", name)
|
||||
endTime := time.Now().Add(*serverStartTimeout)
|
||||
blockCh := make(chan error)
|
||||
defer close(blockCh)
|
||||
|
95
vendor/k8s.io/kubernetes/test/e2e_node/system/BUILD
generated
vendored
95
vendor/k8s.io/kubernetes/test/e2e_node/system/BUILD
generated
vendored
@ -1,95 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cgroup_validator.go",
|
||||
"docker_validator.go",
|
||||
"kernel_validator.go",
|
||||
"kernel_validator_helper.go",
|
||||
"os_validator.go",
|
||||
"package_validator.go",
|
||||
"report.go",
|
||||
"types.go",
|
||||
"validators.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"types_unix.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"types_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node/system",
|
||||
deps = [
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
"//vendor/github.com/docker/docker/api/types:go_default_library",
|
||||
"//vendor/github.com/docker/docker/client:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cgroup_validator_test.go",
|
||||
"docker_validator_test.go",
|
||||
"kernel_validator_test.go",
|
||||
"os_validator_test.go",
|
||||
"package_validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
tags = ["e2e"],
|
||||
deps = [
|
||||
"//vendor/github.com/docker/docker/api/types:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
95
vendor/k8s.io/kubernetes/test/e2e_node/system/cgroup_validator.go
generated
vendored
95
vendor/k8s.io/kubernetes/test/e2e_node/system/cgroup_validator.go
generated
vendored
@ -1,95 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ Validator = &CgroupsValidator{}
|
||||
|
||||
type CgroupsValidator struct {
|
||||
Reporter Reporter
|
||||
}
|
||||
|
||||
func (c *CgroupsValidator) Name() string {
|
||||
return "cgroups"
|
||||
}
|
||||
|
||||
const (
|
||||
cgroupsConfigPrefix = "CGROUPS_"
|
||||
)
|
||||
|
||||
func (c *CgroupsValidator) Validate(spec SysSpec) (error, error) {
|
||||
subsystems, err := c.getCgroupSubsystems()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
return nil, c.validateCgroupSubsystems(spec.Cgroups, subsystems)
|
||||
}
|
||||
|
||||
func (c *CgroupsValidator) validateCgroupSubsystems(cgroupSpec, subsystems []string) error {
|
||||
missing := []string{}
|
||||
for _, cgroup := range cgroupSpec {
|
||||
found := false
|
||||
for _, subsystem := range subsystems {
|
||||
if cgroup == subsystem {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
item := cgroupsConfigPrefix + strings.ToUpper(cgroup)
|
||||
if found {
|
||||
c.Reporter.Report(item, "enabled", good)
|
||||
} else {
|
||||
c.Reporter.Report(item, "missing", bad)
|
||||
missing = append(missing, cgroup)
|
||||
}
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("missing cgroups: %s", strings.Join(missing, " "))
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *CgroupsValidator) getCgroupSubsystems() ([]string, error) {
|
||||
f, err := os.Open("/proc/cgroups")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
subsystems := []string{}
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
text := s.Text()
|
||||
if text[0] != '#' {
|
||||
parts := strings.Fields(text)
|
||||
if len(parts) >= 4 && parts[3] != "0" {
|
||||
subsystems = append(subsystems, parts[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
return subsystems, nil
|
||||
}
|
56
vendor/k8s.io/kubernetes/test/e2e_node/system/cgroup_validator_test.go
generated
vendored
56
vendor/k8s.io/kubernetes/test/e2e_node/system/cgroup_validator_test.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateCgroupSubsystem(t *testing.T) {
|
||||
v := &CgroupsValidator{
|
||||
Reporter: DefaultReporter,
|
||||
}
|
||||
cgroupSpec := []string{"system1", "system2"}
|
||||
for desc, test := range map[string]struct {
|
||||
cgroupSpec []string
|
||||
subsystems []string
|
||||
err bool
|
||||
}{
|
||||
"missing cgroup subsystem should report error": {
|
||||
subsystems: []string{"system1"},
|
||||
err: true,
|
||||
},
|
||||
"extra cgroup subsystems should not report error": {
|
||||
subsystems: []string{"system1", "system2", "system3"},
|
||||
err: false,
|
||||
},
|
||||
"subsystems the same with spec should not report error": {
|
||||
subsystems: []string{"system1", "system2"},
|
||||
err: false,
|
||||
},
|
||||
} {
|
||||
err := v.validateCgroupSubsystems(cgroupSpec, test.subsystems)
|
||||
if !test.err {
|
||||
assert.Nil(t, err, "%q: Expect error not to occur with cgroup", desc)
|
||||
} else {
|
||||
assert.NotNil(t, err, "%q: Expect error to occur with docker info", desc)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
100
vendor/k8s.io/kubernetes/test/e2e_node/system/docker_validator.go
generated
vendored
100
vendor/k8s.io/kubernetes/test/e2e_node/system/docker_validator.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
var _ Validator = &DockerValidator{}
|
||||
|
||||
// DockerValidator validates docker configuration.
|
||||
type DockerValidator struct {
|
||||
Reporter Reporter
|
||||
}
|
||||
|
||||
func (d *DockerValidator) Name() string {
|
||||
return "docker"
|
||||
}
|
||||
|
||||
const (
|
||||
dockerConfigPrefix = "DOCKER_"
|
||||
maxDockerValidatedVersion = "17.03"
|
||||
)
|
||||
|
||||
// TODO(random-liu): Add more validating items.
|
||||
func (d *DockerValidator) Validate(spec SysSpec) (error, error) {
|
||||
if spec.RuntimeSpec.DockerSpec == nil {
|
||||
// If DockerSpec is not specified, assume current runtime is not
|
||||
// docker, skip the docker configuration validation.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
c, err := client.NewClient(dockerEndpoint, "", nil, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create docker client: %v", err)
|
||||
}
|
||||
info, err := c.Info(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get docker info: %v", err)
|
||||
}
|
||||
return d.validateDockerInfo(spec.RuntimeSpec.DockerSpec, info)
|
||||
}
|
||||
|
||||
func (d *DockerValidator) validateDockerInfo(spec *DockerSpec, info types.Info) (error, error) {
|
||||
// Validate docker version.
|
||||
matched := false
|
||||
for _, v := range spec.Version {
|
||||
r := regexp.MustCompile(v)
|
||||
if r.MatchString(info.ServerVersion) {
|
||||
d.Reporter.Report(dockerConfigPrefix+"VERSION", info.ServerVersion, good)
|
||||
matched = true
|
||||
}
|
||||
}
|
||||
if !matched {
|
||||
// If it's of the new Docker version scheme but didn't match above, it
|
||||
// must be a newer version than the most recently validated one.
|
||||
ver := `\d{2}\.\d+\.\d+-[a-z]{2}`
|
||||
r := regexp.MustCompile(ver)
|
||||
if r.MatchString(info.ServerVersion) {
|
||||
d.Reporter.Report(dockerConfigPrefix+"VERSION", info.ServerVersion, good)
|
||||
w := fmt.Errorf(
|
||||
"docker version is greater than the most recently validated version. Docker version: %s. Max validated version: %s",
|
||||
info.ServerVersion,
|
||||
maxDockerValidatedVersion,
|
||||
)
|
||||
return w, nil
|
||||
}
|
||||
d.Reporter.Report(dockerConfigPrefix+"VERSION", info.ServerVersion, bad)
|
||||
return nil, fmt.Errorf("unsupported docker version: %s", info.ServerVersion)
|
||||
}
|
||||
// Validate graph driver.
|
||||
item := dockerConfigPrefix + "GRAPH_DRIVER"
|
||||
for _, gd := range spec.GraphDriver {
|
||||
if info.Driver == gd {
|
||||
d.Reporter.Report(item, info.Driver, good)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
d.Reporter.Report(item, info.Driver, bad)
|
||||
return nil, fmt.Errorf("unsupported graph driver: %s", info.Driver)
|
||||
}
|
88
vendor/k8s.io/kubernetes/test/e2e_node/system/docker_validator_test.go
generated
vendored
88
vendor/k8s.io/kubernetes/test/e2e_node/system/docker_validator_test.go
generated
vendored
@ -1,88 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateDockerInfo(t *testing.T) {
|
||||
v := &DockerValidator{
|
||||
Reporter: DefaultReporter,
|
||||
}
|
||||
spec := &DockerSpec{
|
||||
Version: []string{`1\.1[1-3]\..*`, `17\.03\..*`}, // Requires [1.11, 17.03].
|
||||
GraphDriver: []string{"driver_1", "driver_2"},
|
||||
}
|
||||
for _, test := range []struct {
|
||||
info types.Info
|
||||
err bool
|
||||
warn bool
|
||||
}{
|
||||
{
|
||||
info: types.Info{Driver: "driver_1", ServerVersion: "1.10.1"},
|
||||
err: true,
|
||||
warn: false,
|
||||
},
|
||||
{
|
||||
info: types.Info{Driver: "bad_driver", ServerVersion: "1.11.1"},
|
||||
err: true,
|
||||
warn: false,
|
||||
},
|
||||
{
|
||||
info: types.Info{Driver: "driver_1", ServerVersion: "1.11.1"},
|
||||
err: false,
|
||||
warn: false,
|
||||
},
|
||||
{
|
||||
info: types.Info{Driver: "driver_2", ServerVersion: "1.12.1"},
|
||||
err: false,
|
||||
warn: false,
|
||||
},
|
||||
{
|
||||
info: types.Info{Driver: "driver_2", ServerVersion: "1.13.1"},
|
||||
err: false,
|
||||
warn: false,
|
||||
},
|
||||
{
|
||||
info: types.Info{Driver: "driver_2", ServerVersion: "17.03.0-ce"},
|
||||
err: false,
|
||||
warn: false,
|
||||
},
|
||||
{
|
||||
info: types.Info{Driver: "driver_2", ServerVersion: "17.06.0-ce"},
|
||||
err: false,
|
||||
warn: true,
|
||||
},
|
||||
} {
|
||||
warn, err := v.validateDockerInfo(spec, test.info)
|
||||
if !test.err {
|
||||
assert.Nil(t, err, "Expect error not to occur with docker info %+v", test.info)
|
||||
} else {
|
||||
assert.NotNil(t, err, "Expect error to occur with docker info %+v", test.info)
|
||||
}
|
||||
if !test.warn {
|
||||
assert.Nil(t, warn, "Expect error not to occur with docker info %+v", test.info)
|
||||
} else {
|
||||
assert.NotNil(t, warn, "Expect error to occur with docker info %+v", test.info)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
259
vendor/k8s.io/kubernetes/test/e2e_node/system/kernel_validator.go
generated
vendored
259
vendor/k8s.io/kubernetes/test/e2e_node/system/kernel_validator.go
generated
vendored
@ -1,259 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
var _ Validator = &KernelValidator{}
|
||||
|
||||
// KernelValidator validates kernel. Currently only validate kernel version
|
||||
// and kernel configuration.
|
||||
type KernelValidator struct {
|
||||
kernelRelease string
|
||||
Reporter Reporter
|
||||
}
|
||||
|
||||
func (k *KernelValidator) Name() string {
|
||||
return "kernel"
|
||||
}
|
||||
|
||||
// kConfigOption is the possible kernel config option.
|
||||
type kConfigOption string
|
||||
|
||||
const (
|
||||
builtIn kConfigOption = "y"
|
||||
asModule kConfigOption = "m"
|
||||
leftOut kConfigOption = "n"
|
||||
|
||||
// validKConfigRegex is the regex matching kernel configuration line.
|
||||
validKConfigRegex = "^CONFIG_[A-Z0-9_]+=[myn]"
|
||||
// kConfigPrefix is the prefix of kernel configuration.
|
||||
kConfigPrefix = "CONFIG_"
|
||||
)
|
||||
|
||||
func (k *KernelValidator) Validate(spec SysSpec) (error, error) {
|
||||
helper := KernelValidatorHelperImpl{}
|
||||
release, err := helper.GetKernelReleaseVersion()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get kernel release: %v", err)
|
||||
}
|
||||
k.kernelRelease = release
|
||||
var errs []error
|
||||
errs = append(errs, k.validateKernelVersion(spec.KernelSpec))
|
||||
// only validate kernel config when necessary (currently no kernel config for windows)
|
||||
if len(spec.KernelSpec.Required) > 0 || len(spec.KernelSpec.Forbidden) > 0 || len(spec.KernelSpec.Optional) > 0 {
|
||||
errs = append(errs, k.validateKernelConfig(spec.KernelSpec))
|
||||
}
|
||||
return nil, errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// validateKernelVersion validates the kernel version.
|
||||
func (k *KernelValidator) validateKernelVersion(kSpec KernelSpec) error {
|
||||
glog.Infof("Validating kernel version")
|
||||
versionRegexps := kSpec.Versions
|
||||
for _, versionRegexp := range versionRegexps {
|
||||
r := regexp.MustCompile(versionRegexp)
|
||||
if r.MatchString(k.kernelRelease) {
|
||||
k.Reporter.Report("KERNEL_VERSION", k.kernelRelease, good)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
k.Reporter.Report("KERNEL_VERSION", k.kernelRelease, bad)
|
||||
return fmt.Errorf("unsupported kernel release: %s", k.kernelRelease)
|
||||
}
|
||||
|
||||
// validateKernelConfig validates the kernel configurations.
|
||||
func (k *KernelValidator) validateKernelConfig(kSpec KernelSpec) error {
|
||||
glog.Infof("Validating kernel config")
|
||||
allConfig, err := k.getKernelConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse kernel config: %v", err)
|
||||
}
|
||||
return k.validateCachedKernelConfig(allConfig, kSpec)
|
||||
}
|
||||
|
||||
// validateCachedKernelConfig validates the kernel confgiurations cached in internal data type.
|
||||
func (k *KernelValidator) validateCachedKernelConfig(allConfig map[string]kConfigOption, kSpec KernelSpec) error {
|
||||
badConfigs := []string{}
|
||||
// reportAndRecord is a helper function to record bad config when
|
||||
// report.
|
||||
reportAndRecord := func(name, msg, desc string, result ValidationResultType) {
|
||||
if result == bad {
|
||||
badConfigs = append(badConfigs, name)
|
||||
}
|
||||
// report description when the config is bad or warn.
|
||||
if result != good && desc != "" {
|
||||
msg = msg + " - " + desc
|
||||
}
|
||||
k.Reporter.Report(name, msg, result)
|
||||
}
|
||||
const (
|
||||
required = iota
|
||||
optional
|
||||
forbidden
|
||||
)
|
||||
validateOpt := func(config KernelConfig, expect int) {
|
||||
var found, missing ValidationResultType
|
||||
switch expect {
|
||||
case required:
|
||||
found, missing = good, bad
|
||||
case optional:
|
||||
found, missing = good, warn
|
||||
case forbidden:
|
||||
found, missing = bad, good
|
||||
}
|
||||
var name string
|
||||
var opt kConfigOption
|
||||
var ok bool
|
||||
for _, name = range append([]string{config.Name}, config.Aliases...) {
|
||||
name = kConfigPrefix + name
|
||||
if opt, ok = allConfig[name]; ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
reportAndRecord(name, "not set", config.Description, missing)
|
||||
return
|
||||
}
|
||||
switch opt {
|
||||
case builtIn:
|
||||
reportAndRecord(name, "enabled", config.Description, found)
|
||||
case asModule:
|
||||
reportAndRecord(name, "enabled (as module)", config.Description, found)
|
||||
case leftOut:
|
||||
reportAndRecord(name, "disabled", config.Description, missing)
|
||||
default:
|
||||
reportAndRecord(name, fmt.Sprintf("unknown option: %s", opt), config.Description, missing)
|
||||
}
|
||||
}
|
||||
for _, config := range kSpec.Required {
|
||||
validateOpt(config, required)
|
||||
}
|
||||
for _, config := range kSpec.Optional {
|
||||
validateOpt(config, optional)
|
||||
}
|
||||
for _, config := range kSpec.Forbidden {
|
||||
validateOpt(config, forbidden)
|
||||
}
|
||||
if len(badConfigs) > 0 {
|
||||
return fmt.Errorf("unexpected kernel config: %s", strings.Join(badConfigs, " "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getKernelConfigReader search kernel config file in a predefined list. Once the kernel config
|
||||
// file is found it will read the configurations into a byte buffer and return. If the kernel
|
||||
// config file is not found, it will try to load kernel config module and retry again.
|
||||
func (k *KernelValidator) getKernelConfigReader() (io.Reader, error) {
|
||||
possibePaths := []string{
|
||||
"/proc/config.gz",
|
||||
"/boot/config-" + k.kernelRelease,
|
||||
"/usr/src/linux-" + k.kernelRelease + "/.config",
|
||||
"/usr/src/linux/.config",
|
||||
}
|
||||
configsModule := "configs"
|
||||
modprobeCmd := "modprobe"
|
||||
// loadModule indicates whether we've tried to load kernel config module ourselves.
|
||||
loadModule := false
|
||||
for {
|
||||
for _, path := range possibePaths {
|
||||
_, err := os.Stat(path)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// Buffer the whole file, so that we can close the file and unload
|
||||
// kernel config module in this function.
|
||||
b, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r io.Reader
|
||||
r = bytes.NewReader(b)
|
||||
// This is a gzip file (config.gz), unzip it.
|
||||
if filepath.Ext(path) == ".gz" {
|
||||
r, err = gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
// If we've tried to load kernel config module, break and return error.
|
||||
if loadModule {
|
||||
break
|
||||
}
|
||||
// If the kernel config file is not found, try to load the kernel
|
||||
// config module and check again.
|
||||
output, err := exec.Command(modprobeCmd, configsModule).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load kernel module %q: output - %q, err - %v",
|
||||
configsModule, output, err)
|
||||
}
|
||||
// Unload the kernel config module to make sure the validation have no side effect.
|
||||
defer exec.Command(modprobeCmd, "-r", configsModule).Run()
|
||||
loadModule = true
|
||||
}
|
||||
return nil, fmt.Errorf("no config path in %v is available", possibePaths)
|
||||
}
|
||||
|
||||
// getKernelConfig gets kernel config from kernel config file and convert kernel config to internal type.
|
||||
func (k *KernelValidator) getKernelConfig() (map[string]kConfigOption, error) {
|
||||
r, err := k.getKernelConfigReader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return k.parseKernelConfig(r)
|
||||
}
|
||||
|
||||
// parseKernelConfig converts kernel config to internal type.
|
||||
func (k *KernelValidator) parseKernelConfig(r io.Reader) (map[string]kConfigOption, error) {
|
||||
config := map[string]kConfigOption{}
|
||||
regex := regexp.MustCompile(validKConfigRegex)
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if !regex.MatchString(line) {
|
||||
continue
|
||||
}
|
||||
fields := strings.Split(line, "=")
|
||||
if len(fields) != 2 {
|
||||
glog.Errorf("Unexpected fields number in config %q", line)
|
||||
continue
|
||||
}
|
||||
config[fields[0]] = kConfigOption(fields[1])
|
||||
}
|
||||
return config, nil
|
||||
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e_node/system/kernel_validator_helper.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e_node/system/kernel_validator_helper.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
// KernelValidatorHelper is an interface intended to help with os specific kernel validation
|
||||
type KernelValidatorHelper interface {
|
||||
// GetKernelReleaseVersion gets the current kernel release version of the system
|
||||
GetKernelReleaseVersion() (string, error)
|
||||
}
|
197
vendor/k8s.io/kubernetes/test/e2e_node/system/kernel_validator_test.go
generated
vendored
197
vendor/k8s.io/kubernetes/test/e2e_node/system/kernel_validator_test.go
generated
vendored
@ -1,197 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateKernelVersion(t *testing.T) {
|
||||
v := &KernelValidator{
|
||||
Reporter: DefaultReporter,
|
||||
}
|
||||
// Currently, testRegex is align with DefaultSysSpec.KernelVersion, but in the future
|
||||
// they may be different.
|
||||
// This is fine, because the test mainly tests the kernel version validation logic,
|
||||
// not the DefaultSysSpec. The DefaultSysSpec should be tested with node e2e.
|
||||
testRegex := []string{`3\.[1-9][0-9].*`, `4\..*`}
|
||||
for _, test := range []struct {
|
||||
version string
|
||||
err bool
|
||||
}{
|
||||
// first version regex matches
|
||||
{
|
||||
version: "3.19.9-99-test",
|
||||
err: false,
|
||||
},
|
||||
// one of version regexes matches
|
||||
{
|
||||
version: "4.4.14+",
|
||||
err: false,
|
||||
},
|
||||
// no version regex matches
|
||||
{
|
||||
version: "2.0.0",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
version: "5.0.0",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
version: "3.9.0",
|
||||
err: true,
|
||||
},
|
||||
} {
|
||||
v.kernelRelease = test.version
|
||||
err := v.validateKernelVersion(KernelSpec{Versions: testRegex})
|
||||
if !test.err {
|
||||
assert.Nil(t, err, "Expect error not to occur with kernel version %q", test.version)
|
||||
} else {
|
||||
assert.NotNil(t, err, "Expect error to occur with kenrel version %q", test.version)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCachedKernelConfig(t *testing.T) {
|
||||
v := &KernelValidator{
|
||||
Reporter: DefaultReporter,
|
||||
}
|
||||
testKernelSpec := KernelSpec{
|
||||
Required: []KernelConfig{{Name: "REQUIRED_1"}, {Name: "REQUIRED_2", Aliases: []string{"ALIASE_REQUIRED_2"}}},
|
||||
Optional: []KernelConfig{{Name: "OPTIONAL_1"}, {Name: "OPTIONAL_2"}},
|
||||
Forbidden: []KernelConfig{
|
||||
{Name: "FORBIDDEN_1", Description: "TEST FORBIDDEN"},
|
||||
{Name: "FORBIDDEN_2", Aliases: []string{"ALIASE_FORBIDDEN_2"}},
|
||||
},
|
||||
}
|
||||
for c, test := range []struct {
|
||||
desc string
|
||||
config map[string]kConfigOption
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
desc: "meet all required configurations should not report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": builtIn,
|
||||
"REQUIRED_2": asModule,
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
desc: "one required configuration disabled should report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": leftOut,
|
||||
"REQUIRED_2": builtIn,
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
desc: "one required configuration missing should report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": builtIn,
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
desc: "alias of required configuration should not report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": builtIn,
|
||||
"ALIASE_REQUIRED_2": asModule,
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
desc: "optional configuration set or not should not report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": builtIn,
|
||||
"REQUIRED_2": asModule,
|
||||
"OPTIONAL_1": builtIn,
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
desc: "forbidden configuration disabled should not report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": builtIn,
|
||||
"REQUIRED_2": asModule,
|
||||
"FORBIDDEN_1": leftOut,
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
desc: "forbidden configuration built-in should report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": builtIn,
|
||||
"REQUIRED_2": asModule,
|
||||
"FORBIDDEN_1": builtIn,
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
desc: "forbidden configuration built as module should report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": builtIn,
|
||||
"REQUIRED_2": asModule,
|
||||
"FORBIDDEN_1": asModule,
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
desc: "alias of forbidden configuration should report error.",
|
||||
config: map[string]kConfigOption{
|
||||
"REQUIRED_1": builtIn,
|
||||
"REQUIRED_2": asModule,
|
||||
"ALIASE_FORBIDDEN_2": asModule,
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase #%d %s", c, test.desc)
|
||||
// Add kernel config prefix.
|
||||
for k, v := range test.config {
|
||||
delete(test.config, k)
|
||||
test.config[kConfigPrefix+k] = v
|
||||
}
|
||||
err := v.validateCachedKernelConfig(test.config, testKernelSpec)
|
||||
if !test.err {
|
||||
assert.Nil(t, err, "Expect error not to occur with kernel config %q", test.config)
|
||||
} else {
|
||||
assert.NotNil(t, err, "Expect error to occur with kenrel config %q", test.config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateParseKernelConfig(t *testing.T) {
|
||||
config := `CONFIG_1=y
|
||||
CONFIG_2=m
|
||||
CONFIG_3=n`
|
||||
expected := map[string]kConfigOption{
|
||||
"CONFIG_1": builtIn,
|
||||
"CONFIG_2": asModule,
|
||||
"CONFIG_3": leftOut,
|
||||
}
|
||||
v := &KernelValidator{
|
||||
Reporter: DefaultReporter,
|
||||
}
|
||||
got, err := v.parseKernelConfig(bytes.NewReader([]byte(config)))
|
||||
assert.Nil(t, err, "Expect error not to occur when parse kernel configuration %q", config)
|
||||
assert.Equal(t, expected, got)
|
||||
}
|
50
vendor/k8s.io/kubernetes/test/e2e_node/system/os_validator.go
generated
vendored
50
vendor/k8s.io/kubernetes/test/e2e_node/system/os_validator.go
generated
vendored
@ -1,50 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ Validator = &OSValidator{}
|
||||
|
||||
type OSValidator struct {
|
||||
Reporter Reporter
|
||||
}
|
||||
|
||||
func (o *OSValidator) Name() string {
|
||||
return "os"
|
||||
}
|
||||
|
||||
func (o *OSValidator) Validate(spec SysSpec) (error, error) {
|
||||
os, err := exec.Command("uname").CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get os name: %v", err)
|
||||
}
|
||||
return nil, o.validateOS(strings.TrimSpace(string(os)), spec.OS)
|
||||
}
|
||||
|
||||
func (o *OSValidator) validateOS(os, specOS string) error {
|
||||
if os != specOS {
|
||||
o.Reporter.Report("OS", os, bad)
|
||||
return fmt.Errorf("unsupported operating system: %s", os)
|
||||
}
|
||||
o.Reporter.Report("OS", os, good)
|
||||
return nil
|
||||
}
|
54
vendor/k8s.io/kubernetes/test/e2e_node/system/os_validator_test.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/e2e_node/system/os_validator_test.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateOS(t *testing.T) {
|
||||
v := &OSValidator{
|
||||
Reporter: DefaultReporter,
|
||||
}
|
||||
specOS := "Linux"
|
||||
for _, test := range []struct {
|
||||
os string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
os: "Linux",
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
os: "Windows",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
os: "Darwin",
|
||||
err: true,
|
||||
},
|
||||
} {
|
||||
err := v.validateOS(test.os, specOS)
|
||||
if !test.err {
|
||||
assert.Nil(t, err, "Expect error not to occur with os %q", test.os)
|
||||
} else {
|
||||
assert.NotNil(t, err, "Expect error to occur with os %q", test.os)
|
||||
}
|
||||
}
|
||||
}
|
325
vendor/k8s.io/kubernetes/test/e2e_node/system/package_validator.go
generated
vendored
325
vendor/k8s.io/kubernetes/test/e2e_node/system/package_validator.go
generated
vendored
@ -1,325 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// semVerDotsCount is the number of dots in a valid semantic version.
|
||||
const semVerDotsCount int = 2
|
||||
|
||||
// packageManager is an interface that abstracts the basic operations of a
|
||||
// package manager.
|
||||
type packageManager interface {
|
||||
// getPackageVersion returns the version of the package given the
|
||||
// packageName, or an error if no such package exists.
|
||||
getPackageVersion(packageName string) (string, error)
|
||||
}
|
||||
|
||||
// newPackageManager returns the package manager on the running machine, and an
|
||||
// error if no package managers is available.
|
||||
func newPackageManager() (packageManager, error) {
|
||||
if m, ok := newDPKG(); ok {
|
||||
return m, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to find package manager")
|
||||
}
|
||||
|
||||
// dpkg implements packageManager. It uses "dpkg-query" to retrieve package
|
||||
// information.
|
||||
type dpkg struct{}
|
||||
|
||||
// newDPKG returns a Debian package manager. It returns (nil, false) if no such
|
||||
// package manager exists on the running machine.
|
||||
func newDPKG() (packageManager, bool) {
|
||||
_, err := exec.LookPath("dpkg-query")
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
return dpkg{}, true
|
||||
}
|
||||
|
||||
// getPackageVersion returns the upstream package version for the package given
|
||||
// the packageName, and an error if no such package exists.
|
||||
func (_ dpkg) getPackageVersion(packageName string) (string, error) {
|
||||
output, err := exec.Command("dpkg-query", "--show", "--showformat='${Version}'", packageName).Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("dpkg-query failed: %s", err)
|
||||
}
|
||||
version := extractUpstreamVersion(string(output))
|
||||
if version == "" {
|
||||
return "", fmt.Errorf("no version information")
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// packageValidator implements the Validator interface. It validates packages
|
||||
// and their versions.
|
||||
type packageValidator struct {
|
||||
reporter Reporter
|
||||
kernelRelease string
|
||||
osDistro string
|
||||
}
|
||||
|
||||
// Name returns the name of the package validator.
|
||||
func (self *packageValidator) Name() string {
|
||||
return "package"
|
||||
}
|
||||
|
||||
// Validate checks packages and their versions against the spec using the
|
||||
// package manager on the running machine, and returns an error on any
|
||||
// package/version mismatch.
|
||||
func (self *packageValidator) Validate(spec SysSpec) (error, error) {
|
||||
if len(spec.PackageSpecs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if self.kernelRelease, err = getKernelRelease(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if self.osDistro, err = getOSDistro(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manager, err := newPackageManager()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
specs := applyPackageSpecOverride(spec.PackageSpecs, spec.PackageSpecOverrides, self.osDistro)
|
||||
return self.validate(specs, manager)
|
||||
}
|
||||
|
||||
// Validate checks packages and their versions against the packageSpecs using
|
||||
// the packageManager, and returns an error on any package/version mismatch.
|
||||
func (self *packageValidator) validate(packageSpecs []PackageSpec, manager packageManager) (error, error) {
|
||||
var errs []error
|
||||
for _, spec := range packageSpecs {
|
||||
// Substitute variables in package name.
|
||||
packageName := resolvePackageName(spec.Name, self.kernelRelease)
|
||||
|
||||
nameWithVerRange := fmt.Sprintf("%s (%s)", packageName, spec.VersionRange)
|
||||
|
||||
// Get the version of the package on the running machine.
|
||||
version, err := manager.getPackageVersion(packageName)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to get the version for the package %q: %s\n", packageName, err)
|
||||
errs = append(errs, err)
|
||||
self.reporter.Report(nameWithVerRange, "not installed", bad)
|
||||
continue
|
||||
}
|
||||
|
||||
// Version requirement will not be enforced if version range is
|
||||
// not specified in the spec.
|
||||
if spec.VersionRange == "" {
|
||||
self.reporter.Report(packageName, version, good)
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert both the version range in the spec and the version returned
|
||||
// from package manager to semantic version format, and then check if
|
||||
// the version is in the range.
|
||||
sv, err := semver.Make(toSemVer(version))
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to convert %q to semantic version: %s\n", version, err)
|
||||
errs = append(errs, err)
|
||||
self.reporter.Report(nameWithVerRange, "internal error", bad)
|
||||
continue
|
||||
}
|
||||
versionRange := semver.MustParseRange(toSemVerRange(spec.VersionRange))
|
||||
if versionRange(sv) {
|
||||
self.reporter.Report(nameWithVerRange, version, good)
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("package \"%s %s\" does not meet the spec \"%s (%s)\"", packageName, sv, packageName, spec.VersionRange))
|
||||
self.reporter.Report(nameWithVerRange, version, bad)
|
||||
}
|
||||
}
|
||||
return nil, errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// getKernelRelease returns the kernel release of the local machine.
|
||||
func getKernelRelease() (string, error) {
|
||||
output, err := exec.Command("uname", "-r").Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get kernel release: %s", err)
|
||||
}
|
||||
return strings.TrimSpace(string(output)), nil
|
||||
}
|
||||
|
||||
// getOSDistro returns the OS distro of the local machine.
|
||||
func getOSDistro() (string, error) {
|
||||
f := "/etc/lsb-release"
|
||||
b, err := ioutil.ReadFile(f)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read %q: %s", f, err)
|
||||
}
|
||||
content := string(b)
|
||||
switch {
|
||||
case strings.Contains(content, "Ubuntu"):
|
||||
return "ubuntu", nil
|
||||
case strings.Contains(content, "Chrome OS"):
|
||||
return "cos", nil
|
||||
case strings.Contains(content, "CoreOS"):
|
||||
return "coreos", nil
|
||||
default:
|
||||
return "", fmt.Errorf("failed to get OS distro: %s", content)
|
||||
}
|
||||
}
|
||||
|
||||
// resolvePackageName substitutes the variables in the packageName with the
|
||||
// local information.
|
||||
// E.g., "linux-headers-${KERNEL_RELEASE}" -> "linux-headers-4.4.0-75-generic".
|
||||
func resolvePackageName(packageName string, kernelRelease string) string {
|
||||
packageName = strings.Replace(packageName, "${KERNEL_RELEASE}", kernelRelease, -1)
|
||||
return packageName
|
||||
}
|
||||
|
||||
// applyPackageSpecOverride applies the package spec overrides for the given
|
||||
// osDistro to the packageSpecs and returns the applied result.
|
||||
func applyPackageSpecOverride(packageSpecs []PackageSpec, overrides []PackageSpecOverride, osDistro string) []PackageSpec {
|
||||
var override *PackageSpecOverride
|
||||
for _, o := range overrides {
|
||||
if o.OSDistro == osDistro {
|
||||
override = &o
|
||||
break
|
||||
}
|
||||
}
|
||||
if override == nil {
|
||||
return packageSpecs
|
||||
}
|
||||
|
||||
// Remove packages in the spec that matches the overrides in
|
||||
// Subtractions.
|
||||
var out []PackageSpec
|
||||
subtractions := make(map[string]bool)
|
||||
for _, spec := range override.Subtractions {
|
||||
subtractions[spec.Name] = true
|
||||
}
|
||||
for _, spec := range packageSpecs {
|
||||
if _, ok := subtractions[spec.Name]; !ok {
|
||||
out = append(out, spec)
|
||||
}
|
||||
}
|
||||
|
||||
// Add packages in the spec that matches the overrides in Additions.
|
||||
return append(out, override.Additions...)
|
||||
}
|
||||
|
||||
// extractUpstreamVersion returns the upstream version of the given full
|
||||
// version in dpkg format. E.g., "1:1.0.6-2ubuntu2.1" -> "1.0.6".
|
||||
func extractUpstreamVersion(version string) string {
|
||||
// The full version is in the format of
|
||||
// "[epoch:]upstream_version[-debian_revision]". See
|
||||
// https://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version.
|
||||
version = strings.Trim(version, " '")
|
||||
if i := strings.Index(version, ":"); i != -1 {
|
||||
version = version[i+1:]
|
||||
}
|
||||
if i := strings.Index(version, "-"); i != -1 {
|
||||
version = version[:i]
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
// toSemVerRange converts the input to a semantic version range.
|
||||
// E.g., ">=1.0" -> ">=1.0.x"
|
||||
// ">=1" -> ">=1.x"
|
||||
// ">=1 <=2.3" -> ">=1.x <=2.3.x"
|
||||
// ">1 || >3.1.0 !4.2" -> ">1.x || >3.1.0 !4.2.x"
|
||||
func toSemVerRange(input string) string {
|
||||
var output []string
|
||||
fields := strings.Fields(input)
|
||||
for _, f := range fields {
|
||||
numDots, hasDigits := 0, false
|
||||
for _, c := range f {
|
||||
switch {
|
||||
case c == '.':
|
||||
numDots++
|
||||
case c >= '0' && c <= '9':
|
||||
hasDigits = true
|
||||
}
|
||||
}
|
||||
if hasDigits && numDots < semVerDotsCount {
|
||||
f = strings.TrimRight(f, " ")
|
||||
f += ".x"
|
||||
}
|
||||
output = append(output, f)
|
||||
}
|
||||
return strings.Join(output, " ")
|
||||
}
|
||||
|
||||
// toSemVer converts the input to a semantic version, and an empty string on
|
||||
// error.
|
||||
func toSemVer(version string) string {
|
||||
// Remove the first non-digit and non-dot character as well as the ones
|
||||
// following it.
|
||||
// E.g., "1.8.19p1" -> "1.8.19".
|
||||
if i := strings.IndexFunc(version, func(c rune) bool {
|
||||
if (c < '0' || c > '9') && c != '.' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}); i != -1 {
|
||||
version = version[:i]
|
||||
}
|
||||
|
||||
// Remove the trailing dots if there's any, and then returns an empty
|
||||
// string if nothing left.
|
||||
version = strings.TrimRight(version, ".")
|
||||
if version == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
numDots := strings.Count(version, ".")
|
||||
switch {
|
||||
case numDots < semVerDotsCount:
|
||||
// Add minor version and patch version.
|
||||
// E.g. "1.18" -> "1.18.0" and "481" -> "481.0.0".
|
||||
version += strings.Repeat(".0", semVerDotsCount-numDots)
|
||||
case numDots > semVerDotsCount:
|
||||
// Remove anything beyond the patch version
|
||||
// E.g. "2.0.10.4" -> "2.0.10".
|
||||
for numDots != semVerDotsCount {
|
||||
if i := strings.LastIndex(version, "."); i != -1 {
|
||||
version = version[:i]
|
||||
numDots--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove leading zeros in major/minor/patch version.
|
||||
// E.g., "2.02" -> "2.2"
|
||||
// "8.0.0095" -> "8.0.95"
|
||||
var subs []string
|
||||
for _, s := range strings.Split(version, ".") {
|
||||
s := strings.TrimLeft(s, "0")
|
||||
if s == "" {
|
||||
s = "0"
|
||||
}
|
||||
subs = append(subs, s)
|
||||
}
|
||||
return strings.Join(subs, ".")
|
||||
}
|
266
vendor/k8s.io/kubernetes/test/e2e_node/system/package_validator_test.go
generated
vendored
266
vendor/k8s.io/kubernetes/test/e2e_node/system/package_validator_test.go
generated
vendored
@ -1,266 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExtractUpstreamVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
input: "1.0.6",
|
||||
expected: "1.0.6",
|
||||
},
|
||||
{
|
||||
input: "1:1.0.6",
|
||||
expected: "1.0.6",
|
||||
},
|
||||
{
|
||||
input: "1.0.6-2ubuntu2.1",
|
||||
expected: "1.0.6",
|
||||
},
|
||||
{
|
||||
input: "1:1.0.6-2ubuntu2.1",
|
||||
expected: "1.0.6",
|
||||
},
|
||||
} {
|
||||
got := extractUpstreamVersion(test.input)
|
||||
if test.expected != got {
|
||||
t.Errorf("extractUpstreamVersion(%q) = %q, want %q", test.input, got, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToSemVer(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
input: "1.2.3",
|
||||
expected: "1.2.3",
|
||||
},
|
||||
{
|
||||
input: "1.8.19p1",
|
||||
expected: "1.8.19",
|
||||
},
|
||||
{
|
||||
input: "1.8.19.p1",
|
||||
expected: "1.8.19",
|
||||
},
|
||||
{
|
||||
input: "p1",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
input: "1.18",
|
||||
expected: "1.18.0",
|
||||
},
|
||||
{
|
||||
input: "481",
|
||||
expected: "481.0.0",
|
||||
},
|
||||
{
|
||||
input: "2.0.10.4",
|
||||
expected: "2.0.10",
|
||||
},
|
||||
{
|
||||
input: "03",
|
||||
expected: "3.0.0",
|
||||
},
|
||||
{
|
||||
input: "2.02",
|
||||
expected: "2.2.0",
|
||||
},
|
||||
{
|
||||
input: "8.0.0095",
|
||||
expected: "8.0.95",
|
||||
},
|
||||
} {
|
||||
got := toSemVer(test.input)
|
||||
if test.expected != got {
|
||||
t.Errorf("toSemVer(%q) = %q, want %q", test.input, got, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToSemVerRange(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
input: ">=1.0.0",
|
||||
expected: ">=1.0.0",
|
||||
},
|
||||
{
|
||||
input: ">=1.0",
|
||||
expected: ">=1.0.x",
|
||||
},
|
||||
{
|
||||
input: ">=1",
|
||||
expected: ">=1.x",
|
||||
},
|
||||
{
|
||||
input: ">=1 || !2.3",
|
||||
expected: ">=1.x || !2.3.x",
|
||||
},
|
||||
{
|
||||
input: ">1 || >3.1.0 !4.2",
|
||||
expected: ">1.x || >3.1.0 !4.2.x",
|
||||
},
|
||||
} {
|
||||
got := toSemVerRange(test.input)
|
||||
if test.expected != got {
|
||||
t.Errorf("toSemVerRange(%q) = %q, want %q", test.input, got, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testPackageManager implements the packageManager interface.
|
||||
type testPackageManager struct {
|
||||
packageVersions map[string]string
|
||||
}
|
||||
|
||||
func (m testPackageManager) getPackageVersion(packageName string) (string, error) {
|
||||
if v, ok := m.packageVersions[packageName]; ok {
|
||||
return v, nil
|
||||
}
|
||||
return "", fmt.Errorf("package %q does not exist", packageName)
|
||||
}
|
||||
|
||||
func TestValidatePackageVersion(t *testing.T) {
|
||||
testKernelRelease := "test-kernel-release"
|
||||
manager := testPackageManager{
|
||||
packageVersions: map[string]string{
|
||||
"foo": "1.0.0",
|
||||
"bar": "2.1.0",
|
||||
"bar-" + testKernelRelease: "3.0.0",
|
||||
},
|
||||
}
|
||||
v := &packageValidator{
|
||||
reporter: DefaultReporter,
|
||||
kernelRelease: testKernelRelease,
|
||||
}
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
specs []PackageSpec
|
||||
err error
|
||||
}{
|
||||
{
|
||||
desc: "all packages meet the spec",
|
||||
specs: []PackageSpec{
|
||||
{Name: "foo", VersionRange: ">=1.0"},
|
||||
{Name: "bar", VersionRange: ">=2.0 <= 3.0"},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "package version does not meet the spec",
|
||||
specs: []PackageSpec{
|
||||
{Name: "foo", VersionRange: ">=1.0"},
|
||||
{Name: "bar", VersionRange: ">=3.0"},
|
||||
},
|
||||
err: errors.New("package \"bar 2.1.0\" does not meet the spec \"bar (>=3.0)\""),
|
||||
},
|
||||
{
|
||||
desc: "package not installed",
|
||||
specs: []PackageSpec{
|
||||
{Name: "baz"},
|
||||
},
|
||||
err: errors.New("package \"baz\" does not exist"),
|
||||
},
|
||||
{
|
||||
desc: "use variable in package name",
|
||||
specs: []PackageSpec{
|
||||
{Name: "bar-${KERNEL_RELEASE}", VersionRange: ">=3.0"},
|
||||
},
|
||||
},
|
||||
} {
|
||||
_, err := v.validate(test.specs, manager)
|
||||
if test.err == nil && err != nil {
|
||||
t.Errorf("%s: v.validate(): err = %s", test.desc, err)
|
||||
}
|
||||
if test.err != nil {
|
||||
if err == nil {
|
||||
t.Errorf("%s: v.validate() is expected to fail.", test.desc)
|
||||
} else if test.err.Error() != err.Error() {
|
||||
t.Errorf("%s: v.validate(): err = %q, want = %q", test.desc, err, test.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPackageOverride(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
overrides []PackageSpecOverride
|
||||
osDistro string
|
||||
specs []PackageSpec
|
||||
expected []PackageSpec
|
||||
}{
|
||||
{
|
||||
specs: []PackageSpec{{Name: "foo", VersionRange: ">=1.0"}},
|
||||
expected: []PackageSpec{{Name: "foo", VersionRange: ">=1.0"}},
|
||||
},
|
||||
{
|
||||
osDistro: "ubuntu",
|
||||
overrides: []PackageSpecOverride{
|
||||
{
|
||||
OSDistro: "ubuntu",
|
||||
Subtractions: []PackageSpec{{Name: "foo"}},
|
||||
Additions: []PackageSpec{{Name: "bar", VersionRange: ">=2.0"}},
|
||||
},
|
||||
},
|
||||
specs: []PackageSpec{{Name: "foo", VersionRange: ">=1.0"}},
|
||||
expected: []PackageSpec{{Name: "bar", VersionRange: ">=2.0"}},
|
||||
},
|
||||
{
|
||||
osDistro: "ubuntu",
|
||||
overrides: []PackageSpecOverride{
|
||||
{
|
||||
OSDistro: "debian",
|
||||
Subtractions: []PackageSpec{{Name: "foo"}},
|
||||
},
|
||||
},
|
||||
specs: []PackageSpec{{Name: "foo", VersionRange: ">=1.0"}},
|
||||
expected: []PackageSpec{{Name: "foo", VersionRange: ">=1.0"}},
|
||||
},
|
||||
} {
|
||||
got := applyPackageSpecOverride(test.specs, test.overrides, test.osDistro)
|
||||
if !reflect.DeepEqual(test.expected, got) {
|
||||
t.Errorf("applyPackageSpecOverride(%+v, %+v, %s) = %+v, want = %+v", test.specs, test.overrides, test.osDistro, got, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
78
vendor/k8s.io/kubernetes/test/e2e_node/system/report.go
generated
vendored
78
vendor/k8s.io/kubernetes/test/e2e_node/system/report.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ValidationResultType is type of the validation result. Different validation results
|
||||
// corresponds to different colors.
|
||||
type ValidationResultType int32
|
||||
|
||||
const (
|
||||
good ValidationResultType = iota
|
||||
bad
|
||||
warn
|
||||
)
|
||||
|
||||
// color is the color of the message.
|
||||
type color int32
|
||||
|
||||
const (
|
||||
red color = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
white = 37
|
||||
)
|
||||
|
||||
func colorize(s string, c color) string {
|
||||
return fmt.Sprintf("\033[0;%dm%s\033[0m", c, s)
|
||||
}
|
||||
|
||||
// The default reporter for the system verification test
|
||||
type StreamReporter struct {
|
||||
// The stream that this reporter is writing to
|
||||
WriteStream io.Writer
|
||||
}
|
||||
|
||||
func (dr *StreamReporter) Report(key, value string, resultType ValidationResultType) error {
|
||||
var c color
|
||||
switch resultType {
|
||||
case good:
|
||||
c = green
|
||||
case bad:
|
||||
c = red
|
||||
case warn:
|
||||
c = yellow
|
||||
default:
|
||||
c = white
|
||||
}
|
||||
if dr.WriteStream == nil {
|
||||
return fmt.Errorf("WriteStream has to be defined for this reporter")
|
||||
}
|
||||
|
||||
fmt.Fprintf(dr.WriteStream, "%s: %s\n", colorize(key, white), colorize(value, c))
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultReporter is the default Reporter
|
||||
var DefaultReporter = &StreamReporter{
|
||||
WriteStream: os.Stdout,
|
||||
}
|
269
vendor/k8s.io/kubernetes/test/e2e_node/system/specs/gke.yaml
generated
vendored
269
vendor/k8s.io/kubernetes/test/e2e_node/system/specs/gke.yaml
generated
vendored
@ -1,269 +0,0 @@
|
||||
# This is the system spec that must be satisfied by the images running on GKE.
|
||||
|
||||
os: Linux
|
||||
|
||||
kernelSpec:
|
||||
versions:
|
||||
# GKE requires kernel version 4.4+.
|
||||
- '4\.[4-9].*'
|
||||
- '4\.[1-9][0-9].*'
|
||||
- '[5-9].*'
|
||||
|
||||
# Required kernel configurations -- the configuration must be set to "y" or
|
||||
# "m".
|
||||
required:
|
||||
# The configurations required by virtual machine or cloud provider.
|
||||
|
||||
- name: BOOTPARAM_HARDLOCKUP_PANIC
|
||||
description: 'Enable the kernel to panic on "hard lockups".'
|
||||
- name: BOOTPARAM_SOFTLOCKUP_PANIC
|
||||
description: 'Enable the kernel to panic on "soft lockups".'
|
||||
- name: PANIC_ON_OOPS
|
||||
description: 'Enable the kernel to panic when it oops.'
|
||||
- name: PVPANIC
|
||||
description: 'Enable the VM (guest) to communicate panic events with the
|
||||
host.'
|
||||
- name: DMIID
|
||||
description: 'Make sure /sys/class/dmi is exported - cAdvisor currently
|
||||
uses this to determine which the cloud provider it is: aws, azure, or
|
||||
gce, etc'
|
||||
- name: ACPI_BUTTON
|
||||
description: 'Enable the software-controlled power management, and required
|
||||
by reset or stop button of GCE console.'
|
||||
|
||||
# The configurations required by network.
|
||||
|
||||
- name: INET
|
||||
description: 'Enable TCP/IP networking.'
|
||||
- name: VXLAN
|
||||
description: 'Required by the overlay networking in Kubernetes.'
|
||||
- name: IP_SET
|
||||
description: 'Required by Kubernetes network policy.'
|
||||
- name: IP_SET_HASH_IP
|
||||
description: 'This introduces hash:ip set type support, which is required
|
||||
by Kubernetes Calico networking.'
|
||||
- name: IPVLAN
|
||||
description: 'Required by IPVLAN feature.'
|
||||
- name: IPV6
|
||||
description: 'Required by IPVLAN feature.'
|
||||
- name: IP6_NF_IPTABLES
|
||||
description: 'Required by kube-proxy.'
|
||||
- name: IP_NF_TARGET_REDIRECT
|
||||
alias:
|
||||
- NETFILTER_XT_TARGET_REDIRECT
|
||||
description: 'Enabled REDIRECT: all incoming connections are mapped onto
|
||||
the incoming interface''s address, causing the packets to come to the
|
||||
local machine instead of passing through. This is required by
|
||||
kube-proxy.'
|
||||
- name: NETFILTER_XT_MATCH_COMMENT
|
||||
description: 'This option adds a "comment" dummy-match, which allows you to
|
||||
put comments in your iptables ruleset. Today''s kube-proxy implementation
|
||||
depends on this feature.'
|
||||
# This is not critical, but debian-based container-vm kernel module study
|
||||
# shows that many customers' nodes have loaded those kernel modules. We
|
||||
# suspect sysdig module depends on these set of kernel modules for
|
||||
# monitoring.
|
||||
- name: PACKET_DIAG
|
||||
description: 'Required by ss (similar to netstat) tools to display Linux
|
||||
TCP / UDP network and socket information.'
|
||||
- name: UNIX_DIAG
|
||||
description: 'Required by ss (similar to netstat) tools to display Linux
|
||||
TCP / UDP network and socket information.'
|
||||
- name: INET_DIAG
|
||||
description: 'Required by ss (similar to netstat) tools to display Linux
|
||||
TCP / UDP network and socket information.'
|
||||
- name: INET_TCP_DIAG
|
||||
description: 'Required by ss (similar to netstat) tools to display Linux
|
||||
TCP / UDP network and socket information.'
|
||||
- name: INET_UDP_DIAG
|
||||
description: 'Required by ss (similar to netstat) tools to display Linux
|
||||
TCP / UDP network and socket information.'
|
||||
- name: NETLINK_DIAG
|
||||
description: 'Required by ss (similar to netstat) tools to display Linux
|
||||
TCP / UDP network and socket information.'
|
||||
|
||||
# The configurations are required by filesystem.
|
||||
|
||||
- name: EXT4_FS
|
||||
- name: DEBUG_FS
|
||||
- name: PROC_FS
|
||||
- name: XFS_FS
|
||||
- name: SCSI_PROC_FS
|
||||
# Currently Kubelet supports three docker graph drivers: overlay, aufs, and
|
||||
# devicemapper due to the legacy reason. But for GKE, we plan to only support
|
||||
# overlayfs.
|
||||
- name: OVERLAY_FS
|
||||
description: 'Enable OverlayFS, which will be the only docker graph driver
|
||||
supported on GKE.'
|
||||
- name: NFS_FS
|
||||
description: 'Required by NFS support.'
|
||||
- name: AUTOFS4_FS
|
||||
description: 'Required by NFS support.'
|
||||
- name: NFS_FSCACHE
|
||||
description: 'Required by NFS support.'
|
||||
- name: FSCACHE
|
||||
description: 'Required by NFS support.'
|
||||
- name: CACHEFILES
|
||||
description: 'Required by NFS support.'
|
||||
- name: FUSE_FS
|
||||
description: 'Required by GlusterFS support.'
|
||||
- name: BCACHE
|
||||
# TODO(yguo0905): Add a description for BCACHE.
|
||||
|
||||
# The configuration required by the resource isolation, accounting, and
|
||||
# management.
|
||||
|
||||
- name: NAMESPACES
|
||||
description: 'Required by kubelet and docker. Enabling it allows the
|
||||
processes within a pod or a container to have their own view of the
|
||||
system.'
|
||||
- name: IPC_NS
|
||||
description: 'Required by kubelet and docker. Enabling it allows the
|
||||
processes within a pod or a container to have their own view of the
|
||||
system.'
|
||||
- name: NET_NS
|
||||
description: 'Required by kubelet and docker. Enabling it allows the
|
||||
processes within a pod or a container to have their own view of the
|
||||
system.'
|
||||
- name: PID_NS
|
||||
description: 'Required by kubelet and docker. Enabling it allows the
|
||||
processes within a pod or a container to have their own view of the
|
||||
system.'
|
||||
- name: UTS_NS
|
||||
description: 'Required by kubelet and docker. Enabling it allows the
|
||||
processes within a pod or a container to have their own view of the
|
||||
system.'
|
||||
- name: CGROUPS
|
||||
description: 'Required by kubelet and docker. The resource usage of the
|
||||
processes within a pod or a container can be monitored, accounted, and
|
||||
controlled.'
|
||||
- name: CGROUP_CPUACCT
|
||||
description: 'Required by kubelet and docker. The resource usage of the
|
||||
processes within a pod or a container can be monitored, accounted, and
|
||||
controlled.'
|
||||
- name: CGROUP_DEVICE
|
||||
description: 'Required by kubelet and docker. The resource usage of the
|
||||
processes within a pod or a container can be monitored, accounted, and
|
||||
controlled.'
|
||||
- name: CGROUP_SCHED
|
||||
description: 'Required by kubelet and docker. The resource usage of the
|
||||
processes within a pod or a container can be monitored, accounted, and
|
||||
controlled.'
|
||||
- name: CPUSETS
|
||||
description: 'Required by kubelet and docker. The resource usage of the
|
||||
processes within a pod or a container can be monitored, accounted, and
|
||||
controlled.'
|
||||
- name: MEMCG
|
||||
description: 'Required by kubelet and docker. The resource usage of the
|
||||
processes within a pod or a container can be monitored, accounted, and
|
||||
controlled.'
|
||||
- name: QUOTA
|
||||
description: 'Required by kubelet to have an accurate and efficient disk
|
||||
space and inode accounting, and eventually to limit the usage.'
|
||||
|
||||
# The security-related configurations
|
||||
|
||||
- name: SECCOMP
|
||||
description: 'Enabled the SECCOMP application API.'
|
||||
- name: SECURITY_APPARMOR
|
||||
description: 'Enable for AppArmor support.'
|
||||
- name: CC_STACKPROTECTOR_STRONG
|
||||
alias:
|
||||
- CONFIG_CC_STACKPROTECTOR_REGULAR
|
||||
CONFIG_CC_STACKPROTECTOR_ALL
|
||||
description: 'Add the stack buffer overflow protections.'
|
||||
- name: STRICT_DEVMEM
|
||||
description: 'Required for blocking the direct physical memory access.'
|
||||
- name: IMA
|
||||
description: 'Required for security-related logging and auditing.'
|
||||
- name: AUDIT
|
||||
description: 'Required for security-related logging and auditing.'
|
||||
- name: AUDITSYSCALL
|
||||
description: 'Required for security-related logging and auditing.'
|
||||
|
||||
# Misc. configurations
|
||||
|
||||
- name: MODULES
|
||||
description: 'Required for loadable module support.'
|
||||
- name: PRINTK
|
||||
description: 'Required for kernel logging message.'
|
||||
- name: MMU
|
||||
description: 'Required for memory management hardware and mmap() system
|
||||
call.'
|
||||
|
||||
packageSpecs:
|
||||
- name: apparmor
|
||||
versionRange: '>=2.10.1'
|
||||
- name: apparmor-profiles
|
||||
versionRange: '>=2.10.1'
|
||||
- name: audit
|
||||
versionRange: '>=2.5.0'
|
||||
- name: autofs
|
||||
versionRange: '>=5.0.7'
|
||||
- name: bash
|
||||
versionRange: '>=4.3'
|
||||
- name: bridge-utils
|
||||
versionRange: '>=1.5'
|
||||
- name: cloud-init
|
||||
versionRange: '>=0.7.6'
|
||||
- name: coreutils
|
||||
versionRange: '>=8.24'
|
||||
- name: dbus
|
||||
versionRange: '>=1.6.8'
|
||||
- name: e2fsprogs
|
||||
versionRange: '>=1.4.3'
|
||||
- name: ebtables
|
||||
versionRange: '>=2.0.10'
|
||||
- name: ethtool
|
||||
versionRange: '>=3.18'
|
||||
- name: iproute2
|
||||
versionRange: '>=4.2.0'
|
||||
- name: less
|
||||
versionRange: '>=481'
|
||||
- name: netcat-openbsd
|
||||
versionRange: '>=1.10'
|
||||
- name: python
|
||||
versionRange: '>=2.7.10'
|
||||
- name: pv
|
||||
versionRange: '>=1.3.4'
|
||||
- name: sudo
|
||||
versionRange: '>=1.8.12'
|
||||
- name: systemd
|
||||
versionRange: '>=225'
|
||||
- name: tar
|
||||
versionRange: '>=1.28'
|
||||
- name: util-linux
|
||||
versionRange: '>=2.27.1'
|
||||
- name: wget
|
||||
versionRange: '>=1.18'
|
||||
- name: gce-compute-image-packages
|
||||
versionRange: '>=20170227'
|
||||
# TODO(yguo0905): Figure out whether watchdog is required.
|
||||
|
||||
# packageSpecOverrides contains the OS distro specific package requirements.
|
||||
packageSpecOverrides:
|
||||
# The following overrides apply to all Ubuntu images.
|
||||
- osDistro: ubuntu
|
||||
subtractions:
|
||||
- name: apparmor-profiles
|
||||
description: 'On Ubuntu the apparmor profiles are shipped with individual
|
||||
application package, so the "apparmor-profiles" package is not required.'
|
||||
- name: audit
|
||||
description: 'On Ubuntu the equivalent package is called "auditd", so the
|
||||
"audit" package is not required and "auditd" exists in the additions.'
|
||||
- name: wget
|
||||
description: 'The Ubuntu 1604-xenial image includes wget 1.17.1, which does
|
||||
not satisfy the spec (>=1.18), but meets the functionality requirements.
|
||||
Therefore, it is removed from the base spec. See wget in the additions.'
|
||||
additions:
|
||||
- name: auditd
|
||||
versionRange: '>=2.4.5'
|
||||
description: 'auditd 2.4.5 currently satisfies the requirements because the
|
||||
GKE features that require auditd 2.5 are not yet available.'
|
||||
- name: grub-common
|
||||
versionRange: '>=2.2'
|
||||
description: 'grub is the bootloader on Ubuntu.'
|
||||
- name: wget
|
||||
versionRange: '>=1.17.1'
|
||||
description: 'wget 1.17.1 satisfies the functionality requirements but does
|
||||
not meet the spec, which is fine'
|
124
vendor/k8s.io/kubernetes/test/e2e_node/system/types.go
generated
vendored
124
vendor/k8s.io/kubernetes/test/e2e_node/system/types.go
generated
vendored
@ -1,124 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
// KernelConfig defines one kernel configration item.
|
||||
type KernelConfig struct {
|
||||
// Name is the general name of the kernel configuration. It is used to
|
||||
// match kernel configuration.
|
||||
Name string `json:"name,omitempty"`
|
||||
// TODO(yguo0905): Support the "or" operation, which will be the same
|
||||
// as the "aliases".
|
||||
//
|
||||
// Aliases are aliases of the kernel configuration. Some configuration
|
||||
// has different names in different kernel version. Names of different
|
||||
// versions will be treated as aliases.
|
||||
Aliases []string `json:"aliases,omitempty"`
|
||||
// Description is the description of the kernel configuration, for example:
|
||||
// * What is it used for?
|
||||
// * Why is it needed?
|
||||
// * Who needs it?
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// KernelSpec defines the specification for the kernel. Currently, it contains
|
||||
// specification for:
|
||||
// * Kernel Version
|
||||
// * Kernel Configuration
|
||||
type KernelSpec struct {
|
||||
// Versions define supported kernel version. It is a group of regexps.
|
||||
Versions []string `json:"versions,omitempty"`
|
||||
// Required contains all kernel configurations required to be enabled
|
||||
// (built in or as module).
|
||||
Required []KernelConfig `json:"required,omitempty"`
|
||||
// Optional contains all kernel configurations are required for optional
|
||||
// features.
|
||||
Optional []KernelConfig `json:"optional,omitempty"`
|
||||
// Forbidden contains all kernel configurations which areforbidden (disabled
|
||||
// or not set)
|
||||
Forbidden []KernelConfig `json:"forbidden,omitempty"`
|
||||
}
|
||||
|
||||
// DockerSpec defines the requirement configuration for docker. Currently, it only
|
||||
// contains spec for graph driver.
|
||||
type DockerSpec struct {
|
||||
// Version is a group of regex matching supported docker versions.
|
||||
Version []string `json:"version,omitempty"`
|
||||
// GraphDriver is the graph drivers supported by kubelet.
|
||||
GraphDriver []string `json:"graphDriver,omitempty"`
|
||||
}
|
||||
|
||||
// RuntimeSpec is the abstract layer for different runtimes. Different runtimes
|
||||
// should put their spec inside the RuntimeSpec.
|
||||
type RuntimeSpec struct {
|
||||
*DockerSpec `json:",inline"`
|
||||
}
|
||||
|
||||
// PackageSpec defines the required packages and their versions.
|
||||
// PackageSpec is only supported on OS distro with Debian package manager.
|
||||
//
|
||||
// TODO(yguo0905): Support operator OR of multiple packages for the case where
|
||||
// either "foo (>=1.0)" or "bar (>=2.0)" is required.
|
||||
type PackageSpec struct {
|
||||
// Name is the name of the package to be checked.
|
||||
Name string `json:"name,omitempty"`
|
||||
// VersionRange represents a range of versions that the package must
|
||||
// satisfy. Note that the version requirement will not be enforced if
|
||||
// the version range is empty. For example,
|
||||
// - "" would match any versions but the package must be installed.
|
||||
// - ">=1" would match "1.0.0", "1.0.1", "1.1.0", and "2.0".
|
||||
// - ">1.0 <2.0" would match between both ranges, so "1.1.1" and "1.8.7"
|
||||
// but not "1.0.0" or "2.0.0".
|
||||
// - "<2.0.0 || >=3.0.0" would match "1.0.0" and "3.0.0" but not "2.0.0".
|
||||
VersionRange string `json:"versionRange,omitempty"`
|
||||
// Description explains the reason behind this package requirements.
|
||||
//
|
||||
// TODO(yguo0905): Print the description where necessary.
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// PackageSpecOverride defines the overrides on the PackageSpec for an OS
|
||||
// distro.
|
||||
type PackageSpecOverride struct {
|
||||
// OSDistro identifies to which OS distro this override applies.
|
||||
// Must be "ubuntu", "cos" or "coreos".
|
||||
OSDistro string `json:"osDistro,omitempty"`
|
||||
// Subtractions is a list of package names that are excluded from the
|
||||
// package spec.
|
||||
Subtractions []PackageSpec `json:"subtractions,omitempty"`
|
||||
// Additions is a list of additional package requirements included the
|
||||
// package spec.
|
||||
Additions []PackageSpec `json:"additions,omitempty"`
|
||||
}
|
||||
|
||||
// SysSpec defines the requirement of supported system. Currently, it only contains
|
||||
// spec for OS, Kernel and Cgroups.
|
||||
type SysSpec struct {
|
||||
// OS is the operating system of the SysSpec.
|
||||
OS string `json:"os,omitempty"`
|
||||
// KernelConfig defines the spec for kernel.
|
||||
KernelSpec KernelSpec `json:"kernelSpec,omitempty"`
|
||||
// Cgroups is the required cgroups.
|
||||
Cgroups []string `json:"cgroups,omitempty"`
|
||||
// RuntimeSpec defines the spec for runtime.
|
||||
RuntimeSpec RuntimeSpec `json:"runtimeSpec,omitempty"`
|
||||
// PackageSpec defines the required packages and their versions.
|
||||
PackageSpecs []PackageSpec `json:"packageSpecs,omitempty"`
|
||||
// PackageSpec defines the overrides of the required packages and their
|
||||
// versions for an OS distro.
|
||||
PackageSpecOverrides []PackageSpecOverride `json:"packageSpecOverrides,omitempty"`
|
||||
}
|
83
vendor/k8s.io/kubernetes/test/e2e_node/system/types_unix.go
generated
vendored
83
vendor/k8s.io/kubernetes/test/e2e_node/system/types_unix.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// dockerEndpoint is the os specific endpoint for docker communication
|
||||
const dockerEndpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
// DefaultSysSpec is the default SysSpec for Linux
|
||||
var DefaultSysSpec = SysSpec{
|
||||
OS: "Linux",
|
||||
KernelSpec: KernelSpec{
|
||||
Versions: []string{`3\.[1-9][0-9].*`, `4\..*`}, // Requires 3.10+ or 4+
|
||||
// TODO(random-liu): Add more config
|
||||
// TODO(random-liu): Add description for each kernel configuration:
|
||||
Required: []KernelConfig{
|
||||
{Name: "NAMESPACES"},
|
||||
{Name: "NET_NS"},
|
||||
{Name: "PID_NS"},
|
||||
{Name: "IPC_NS"},
|
||||
{Name: "UTS_NS"},
|
||||
{Name: "CGROUPS"},
|
||||
{Name: "CGROUP_CPUACCT"},
|
||||
{Name: "CGROUP_DEVICE"},
|
||||
{Name: "CGROUP_FREEZER"},
|
||||
{Name: "CGROUP_SCHED"},
|
||||
{Name: "CPUSETS"},
|
||||
{Name: "MEMCG"},
|
||||
{Name: "INET"},
|
||||
{Name: "EXT4_FS"},
|
||||
{Name: "PROC_FS"},
|
||||
{Name: "NETFILTER_XT_TARGET_REDIRECT", Aliases: []string{"IP_NF_TARGET_REDIRECT"}},
|
||||
{Name: "NETFILTER_XT_MATCH_COMMENT"},
|
||||
},
|
||||
Optional: []KernelConfig{
|
||||
{Name: "OVERLAY_FS", Aliases: []string{"OVERLAYFS_FS"}, Description: "Required for overlayfs."},
|
||||
{Name: "AUFS_FS", Description: "Required for aufs."},
|
||||
{Name: "BLK_DEV_DM", Description: "Required for devicemapper."},
|
||||
},
|
||||
Forbidden: []KernelConfig{},
|
||||
},
|
||||
Cgroups: []string{"cpu", "cpuacct", "cpuset", "devices", "freezer", "memory"},
|
||||
RuntimeSpec: RuntimeSpec{
|
||||
DockerSpec: &DockerSpec{
|
||||
Version: []string{`1\.1[1-3]\..*`, `17\.03\..*`}, // Requires [1.11, 17.03]
|
||||
GraphDriver: []string{"aufs", "overlay", "overlay2", "devicemapper"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// KernelValidatorHelperImpl is the 'linux' implementation of KernelValidatorHelper
|
||||
type KernelValidatorHelperImpl struct{}
|
||||
|
||||
var _ KernelValidatorHelper = &KernelValidatorHelperImpl{}
|
||||
|
||||
// GetKernelReleaseVersion returns the kernel release version (ex. 4.4.0-96-generic) as a string
|
||||
func (o *KernelValidatorHelperImpl) GetKernelReleaseVersion() (string, error) {
|
||||
releaseVersion, err := exec.Command("uname", "-r").CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(releaseVersion)), nil
|
||||
}
|
60
vendor/k8s.io/kubernetes/test/e2e_node/system/types_windows.go
generated
vendored
60
vendor/k8s.io/kubernetes/test/e2e_node/system/types_windows.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// dockerEndpoint is the os specific endpoint for docker communication
|
||||
const dockerEndpoint = "npipe:////./pipe/docker_engine"
|
||||
|
||||
// DefaultSysSpec is the default SysSpec for Windows
|
||||
var DefaultSysSpec = SysSpec{
|
||||
OS: "Microsoft Windows Server 2016",
|
||||
KernelSpec: KernelSpec{
|
||||
Versions: []string{`10\.0\.1439[3-9]`, `10\.0\.14[4-9][0-9]{2}`, `10\.0\.1[5-9][0-9]{3}`, `10\.0\.[2-9][0-9]{4}`, `10\.[1-9]+\.[0-9]+`}, //requires >= '10.0.14393'
|
||||
Required: []KernelConfig{},
|
||||
Optional: []KernelConfig{},
|
||||
Forbidden: []KernelConfig{},
|
||||
},
|
||||
Cgroups: []string{},
|
||||
RuntimeSpec: RuntimeSpec{
|
||||
DockerSpec: &DockerSpec{
|
||||
Version: []string{`17\.03\..*`}, //Requires [17.03] or later
|
||||
GraphDriver: []string{"windowsfilter"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// KernelValidatorHelperImpl is the 'windows' implementation of KernelValidatorHelper
|
||||
type KernelValidatorHelperImpl struct{}
|
||||
|
||||
var _ KernelValidatorHelper = &KernelValidatorHelperImpl{}
|
||||
|
||||
// GetKernelRelease returns the windows release version (ex. 10.0.14393) as a string
|
||||
func (o *KernelValidatorHelperImpl) GetKernelReleaseVersion() (string, error) {
|
||||
args := []string{"(Get-CimInstance Win32_OperatingSystem).Version"}
|
||||
releaseVersion, err := exec.Command("powershell", args...).Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(releaseVersion)), nil
|
||||
}
|
72
vendor/k8s.io/kubernetes/test/e2e_node/system/validators.go
generated
vendored
72
vendor/k8s.io/kubernetes/test/e2e_node/system/validators.go
generated
vendored
@ -1,72 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
// Validator is the interface for all validators.
|
||||
type Validator interface {
|
||||
// Name is the name of the validator.
|
||||
Name() string
|
||||
// Validate is the validate function.
|
||||
Validate(SysSpec) (error, error)
|
||||
}
|
||||
|
||||
// Reporter is the interface for the reporters for the validators.
|
||||
type Reporter interface {
|
||||
// Report reports the results of the system verification
|
||||
Report(string, string, ValidationResultType) error
|
||||
}
|
||||
|
||||
// Validate uses validators to validate the system and returns a warning or error.
|
||||
func Validate(spec SysSpec, validators []Validator) (error, error) {
|
||||
var errs []error
|
||||
var warns []error
|
||||
|
||||
for _, v := range validators {
|
||||
glog.Infof("Validating %s...", v.Name())
|
||||
warn, err := v.Validate(spec)
|
||||
errs = append(errs, err)
|
||||
warns = append(warns, warn)
|
||||
}
|
||||
return errors.NewAggregate(warns), errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// ValidateSpec uses all default validators to validate the system and writes to stdout.
|
||||
func ValidateSpec(spec SysSpec, runtime string) (error, error) {
|
||||
// OS-level validators.
|
||||
var osValidators = []Validator{
|
||||
&OSValidator{Reporter: DefaultReporter},
|
||||
&KernelValidator{Reporter: DefaultReporter},
|
||||
&CgroupsValidator{Reporter: DefaultReporter},
|
||||
&packageValidator{reporter: DefaultReporter},
|
||||
}
|
||||
// Docker-specific validators.
|
||||
var dockerValidators = []Validator{
|
||||
&DockerValidator{Reporter: DefaultReporter},
|
||||
}
|
||||
|
||||
validators := osValidators
|
||||
switch runtime {
|
||||
case "docker":
|
||||
validators = append(validators, dockerValidators...)
|
||||
}
|
||||
return Validate(spec, validators)
|
||||
}
|
39
vendor/k8s.io/kubernetes/test/e2e_node/util.go
generated
vendored
39
vendor/k8s.io/kubernetes/test/e2e_node/util.go
generated
vendored
@ -27,25 +27,30 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -56,11 +61,15 @@ var kubeletAddress = flag.String("kubelet-address", "http://127.0.0.1:10255", "H
|
||||
|
||||
var startServices = flag.Bool("start-services", true, "If true, start local node services")
|
||||
var stopServices = flag.Bool("stop-services", true, "If true, stop local node services after running tests")
|
||||
var busyboxImage = "busybox"
|
||||
var busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
|
||||
const (
|
||||
// Kubelet internal cgroup name for node allocatable cgroup.
|
||||
defaultNodeAllocatableCgroup = "kubepods"
|
||||
// defaultPodResourcesPath is the path to the local endpoint serving the podresources GRPC service.
|
||||
defaultPodResourcesPath = "/var/lib/kubelet/pod-resources"
|
||||
defaultPodResourcesTimeout = 10 * time.Second
|
||||
defaultPodResourcesMaxSize = 1024 * 1024 * 16 // 16 Mb
|
||||
)
|
||||
|
||||
func getNodeSummary() (*stats.Summary, error) {
|
||||
@ -91,6 +100,22 @@ func getNodeSummary() (*stats.Summary, error) {
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
func getNodeDevices() (*podresourcesapi.ListPodResourcesResponse, error) {
|
||||
endpoint := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
client, conn, err := podresources.GetClient(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting grpc client: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
resp, err := client.List(ctx, &podresourcesapi.ListPodResourcesRequest{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v.Get(_) = _, %v", client, err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Returns the current KubeletConfiguration
|
||||
func getCurrentKubeletConfig() (*kubeletconfig.KubeletConfiguration, error) {
|
||||
resp := pollConfigz(5*time.Minute, 5*time.Second)
|
||||
@ -194,7 +219,7 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
if !apiequality.Semantic.DeepEqual(*kubeCfg, *newKubeCfg) {
|
||||
return fmt.Errorf("still waiting for new configuration to take effect, will continue to watch /configz")
|
||||
}
|
||||
glog.Infof("new configuration has taken effect")
|
||||
klog.Infof("new configuration has taken effect")
|
||||
return nil
|
||||
}, restartGap, pollInterval).Should(BeNil())
|
||||
|
||||
@ -237,11 +262,11 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Respon
|
||||
Eventually(func() bool {
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get /configz, retrying. Error: %v", err)
|
||||
klog.Errorf("Failed to get /configz, retrying. Error: %v", err)
|
||||
return false
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
glog.Errorf("/configz response status not 200, retrying. Response was: %+v", resp)
|
||||
klog.Errorf("/configz response status not 200, retrying. Response was: %+v", resp)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
Reference in New Issue
Block a user