mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Changes to accommodate client-go changes and kube vendor update
to v1.18.0 Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
4c96ad3c85
commit
34fc1d847e
151
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
151
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -6,6 +6,8 @@
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme",
|
||||
"k8s.io/kubernetes/pkg/api/service",
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod",
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource",
|
||||
"k8s.io/kubernetes/pkg/api/v1/service",
|
||||
"k8s.io/kubernetes/pkg/apis/apps",
|
||||
"k8s.io/kubernetes/pkg/apis/apps/validation",
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling",
|
||||
@ -26,6 +28,7 @@
|
||||
"k8s.io/kubernetes/pkg/apis/storage/v1/util",
|
||||
"k8s.io/kubernetes/pkg/capabilities",
|
||||
"k8s.io/kubernetes/pkg/client/conditions",
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers",
|
||||
"k8s.io/kubernetes/pkg/controller",
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util",
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle",
|
||||
@ -34,6 +37,11 @@
|
||||
"k8s.io/kubernetes/pkg/controller/util/node",
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util",
|
||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling",
|
||||
"k8s.io/kubernetes/pkg/credentialprovider",
|
||||
"k8s.io/kubernetes/pkg/credentialprovider/aws",
|
||||
"k8s.io/kubernetes/pkg/credentialprovider/azure",
|
||||
"k8s.io/kubernetes/pkg/credentialprovider/gcp",
|
||||
"k8s.io/kubernetes/pkg/credentialprovider/secrets",
|
||||
"k8s.io/kubernetes/pkg/features",
|
||||
"k8s.io/kubernetes/pkg/fieldpath",
|
||||
"k8s.io/kubernetes/pkg/kubectl",
|
||||
@ -52,51 +60,175 @@
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/resource",
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/slice",
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/storage",
|
||||
"k8s.io/kubernetes/pkg/kubelet",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/config",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor",
|
||||
"k8s.io/kubernetes/pkg/kubelet/certificate",
|
||||
"k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap",
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpoint",
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager",
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum",
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cloudresource",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask",
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/util",
|
||||
"k8s.io/kubernetes/pkg/kubelet/config",
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap",
|
||||
"k8s.io/kubernetes/pkg/kubelet/container",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/cm",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/metrics",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/network",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics",
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/remote",
|
||||
"k8s.io/kubernetes/pkg/kubelet/envvars",
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction",
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction/api",
|
||||
"k8s.io/kubernetes/pkg/kubelet/events",
|
||||
"k8s.io/kubernetes/pkg/kubelet/images",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kuberuntime",
|
||||
"k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs",
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky",
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle",
|
||||
"k8s.io/kubernetes/pkg/kubelet/logs",
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics",
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/dns",
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodelease",
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus",
|
||||
"k8s.io/kubernetes/pkg/kubelet/oom",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pleg",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler",
|
||||
"k8s.io/kubernetes/pkg/kubelet/pod",
|
||||
"k8s.io/kubernetes/pkg/kubelet/preemption",
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober",
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results",
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos",
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote",
|
||||
"k8s.io/kubernetes/pkg/kubelet/runtimeclass",
|
||||
"k8s.io/kubernetes/pkg/kubelet/server",
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/metrics",
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/portforward",
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand",
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats",
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/streaming",
|
||||
"k8s.io/kubernetes/pkg/kubelet/stats",
|
||||
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit",
|
||||
"k8s.io/kubernetes/pkg/kubelet/status",
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret",
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl",
|
||||
"k8s.io/kubernetes/pkg/kubelet/types",
|
||||
"k8s.io/kubernetes/pkg/kubelet/token",
|
||||
"k8s.io/kubernetes/pkg/kubelet/util",
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format",
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager",
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/store",
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager",
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics",
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator",
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler",
|
||||
"k8s.io/kubernetes/pkg/kubemark",
|
||||
"k8s.io/kubernetes/pkg/master/ports",
|
||||
"k8s.io/kubernetes/pkg/probe",
|
||||
"k8s.io/kubernetes/pkg/probe/exec",
|
||||
"k8s.io/kubernetes/pkg/probe/http",
|
||||
"k8s.io/kubernetes/pkg/probe/tcp",
|
||||
"k8s.io/kubernetes/pkg/proxy",
|
||||
"k8s.io/kubernetes/pkg/proxy/apis",
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/config",
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/config/scheme",
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1",
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/config/validation",
|
||||
"k8s.io/kubernetes/pkg/proxy/config",
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck",
|
||||
"k8s.io/kubernetes/pkg/proxy/iptables",
|
||||
"k8s.io/kubernetes/pkg/proxy/ipvs",
|
||||
"k8s.io/kubernetes/pkg/proxy/metaproxier",
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics",
|
||||
"k8s.io/kubernetes/pkg/proxy/userspace",
|
||||
"k8s.io/kubernetes/pkg/proxy/util",
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/allocator",
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator",
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm",
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates",
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util",
|
||||
"k8s.io/kubernetes/pkg/scheduler/api",
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper",
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity",
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename",
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports",
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources",
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1",
|
||||
"k8s.io/kubernetes/pkg/scheduler/listers",
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics",
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo",
|
||||
"k8s.io/kubernetes/pkg/scheduler/util",
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder",
|
||||
"k8s.io/kubernetes/pkg/security/apparmor",
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp",
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl",
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/util",
|
||||
"k8s.io/kubernetes/pkg/securitycontext",
|
||||
"k8s.io/kubernetes/pkg/serviceaccount",
|
||||
"k8s.io/kubernetes/pkg/ssh",
|
||||
"k8s.io/kubernetes/pkg/util/async",
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth",
|
||||
"k8s.io/kubernetes/pkg/util/config",
|
||||
"k8s.io/kubernetes/pkg/util/configz",
|
||||
"k8s.io/kubernetes/pkg/util/conntrack",
|
||||
"k8s.io/kubernetes/pkg/util/ebtables",
|
||||
"k8s.io/kubernetes/pkg/util/env",
|
||||
"k8s.io/kubernetes/pkg/util/filesystem",
|
||||
"k8s.io/kubernetes/pkg/util/flag",
|
||||
"k8s.io/kubernetes/pkg/util/flock",
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap",
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff",
|
||||
"k8s.io/kubernetes/pkg/util/hash",
|
||||
"k8s.io/kubernetes/pkg/util/ipset",
|
||||
"k8s.io/kubernetes/pkg/util/iptables",
|
||||
"k8s.io/kubernetes/pkg/util/ipvs",
|
||||
"k8s.io/kubernetes/pkg/util/labels",
|
||||
"k8s.io/kubernetes/pkg/util/node",
|
||||
"k8s.io/kubernetes/pkg/util/oom",
|
||||
"k8s.io/kubernetes/pkg/util/parsers",
|
||||
"k8s.io/kubernetes/pkg/util/pod",
|
||||
"k8s.io/kubernetes/pkg/util/procfs",
|
||||
"k8s.io/kubernetes/pkg/util/removeall",
|
||||
"k8s.io/kubernetes/pkg/util/resizefs",
|
||||
"k8s.io/kubernetes/pkg/util/rlimit",
|
||||
"k8s.io/kubernetes/pkg/util/selinux",
|
||||
"k8s.io/kubernetes/pkg/util/slice",
|
||||
"k8s.io/kubernetes/pkg/util/sysctl",
|
||||
"k8s.io/kubernetes/pkg/util/system",
|
||||
"k8s.io/kubernetes/pkg/util/tail",
|
||||
"k8s.io/kubernetes/pkg/util/taints",
|
||||
"k8s.io/kubernetes/pkg/volume",
|
||||
"k8s.io/kubernetes/pkg/volume/util",
|
||||
@ -112,17 +244,24 @@
|
||||
{
|
||||
"SelectorRegexp": "k8s[.]io/kubernetes/test/",
|
||||
"AllowedPrefixes": [
|
||||
"k8s.io/kubernetes/test/e2e/framework",
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth",
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper",
|
||||
"k8s.io/kubernetes/test/e2e/framework/kubectl",
|
||||
"k8s.io/kubernetes/test/e2e/framework/log",
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics",
|
||||
"k8s.io/kubernetes/test/e2e/framework/network",
|
||||
"k8s.io/kubernetes/test/e2e/framework/node",
|
||||
"k8s.io/kubernetes/test/e2e/framework/pod",
|
||||
"k8s.io/kubernetes/test/e2e/framework/rc",
|
||||
"k8s.io/kubernetes/test/e2e/framework/resource",
|
||||
"k8s.io/kubernetes/test/e2e/framework/service",
|
||||
"k8s.io/kubernetes/test/e2e/framework/ssh",
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles",
|
||||
"k8s.io/kubernetes/test/e2e/manifest",
|
||||
"k8s.io/kubernetes/test/e2e/perftype",
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils",
|
||||
"k8s.io/kubernetes/test/e2e/system",
|
||||
"k8s.io/kubernetes/test/utils",
|
||||
"k8s.io/kubernetes/test/utils/image"
|
||||
],
|
||||
@ -131,7 +270,7 @@
|
||||
{
|
||||
"SelectorRegexp": "k8s[.]io/kubernetes/third_party/",
|
||||
"AllowedPrefixes": [
|
||||
"k8s.io/kubernetes/third_party/forked/golang/expansion"
|
||||
"k8s.io/kubernetes/third_party/forked/golang/expansion"
|
||||
],
|
||||
"ForbiddenPrefixes": []
|
||||
},
|
||||
@ -140,7 +279,11 @@
|
||||
"AllowedPrefixes": [
|
||||
"k8s.io/utils/buffer",
|
||||
"k8s.io/utils/exec",
|
||||
"k8s.io/utils/inotify",
|
||||
"k8s.io/utils/integer",
|
||||
"k8s.io/utils/io",
|
||||
"k8s.io/utils/keymutex",
|
||||
"k8s.io/utils/mount",
|
||||
"k8s.io/utils/net",
|
||||
"k8s.io/utils/nsenter",
|
||||
"k8s.io/utils/path",
|
||||
|
42
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
42
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
@ -3,26 +3,20 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checks.go",
|
||||
"cleanup.go",
|
||||
"exec_util.go",
|
||||
"expect.go",
|
||||
"flake_reporting_util.go",
|
||||
"framework.go",
|
||||
"get-kubemark-resource-usage.go",
|
||||
"google_compute.go",
|
||||
"log.go",
|
||||
"log_size_monitoring.go",
|
||||
"nodes_util.go",
|
||||
"pods.go",
|
||||
"profile_gatherer.go",
|
||||
"provider.go",
|
||||
"psp.go",
|
||||
"rc_util.go",
|
||||
"resource_usage_gatherer.go",
|
||||
"size.go",
|
||||
"skip.go",
|
||||
"suites.go",
|
||||
"test_context.go",
|
||||
"util.go",
|
||||
],
|
||||
@ -30,10 +24,8 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
@ -55,11 +47,9 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
@ -76,6 +66,7 @@ go_library(
|
||||
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/kubectl:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/node:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
@ -95,6 +86,18 @@ go_library(
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["log_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
@ -110,14 +113,14 @@ filegroup(
|
||||
"//test/e2e/framework/autoscaling:all-srcs",
|
||||
"//test/e2e/framework/config:all-srcs",
|
||||
"//test/e2e/framework/deployment:all-srcs",
|
||||
"//test/e2e/framework/deviceplugin:all-srcs",
|
||||
"//test/e2e/framework/endpoints:all-srcs",
|
||||
"//test/e2e/framework/events:all-srcs",
|
||||
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
||||
"//test/e2e/framework/gpu:all-srcs",
|
||||
"//test/e2e/framework/ingress:all-srcs",
|
||||
"//test/e2e/framework/job:all-srcs",
|
||||
"//test/e2e/framework/kubectl:all-srcs",
|
||||
"//test/e2e/framework/kubelet:all-srcs",
|
||||
"//test/e2e/framework/lifecycle:all-srcs",
|
||||
"//test/e2e/framework/log:all-srcs",
|
||||
"//test/e2e/framework/metrics:all-srcs",
|
||||
"//test/e2e/framework/network:all-srcs",
|
||||
@ -132,29 +135,18 @@ filegroup(
|
||||
"//test/e2e/framework/providers/openstack:all-srcs",
|
||||
"//test/e2e/framework/providers/vsphere:all-srcs",
|
||||
"//test/e2e/framework/pv:all-srcs",
|
||||
"//test/e2e/framework/rc:all-srcs",
|
||||
"//test/e2e/framework/replicaset:all-srcs",
|
||||
"//test/e2e/framework/resource:all-srcs",
|
||||
"//test/e2e/framework/security:all-srcs",
|
||||
"//test/e2e/framework/service:all-srcs",
|
||||
"//test/e2e/framework/skipper:all-srcs",
|
||||
"//test/e2e/framework/ssh:all-srcs",
|
||||
"//test/e2e/framework/statefulset:all-srcs",
|
||||
"//test/e2e/framework/testfiles:all-srcs",
|
||||
"//test/e2e/framework/timer:all-srcs",
|
||||
"//test/e2e/framework/viperconfig:all-srcs",
|
||||
"//test/e2e/framework/volume:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["log_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/framework/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/framework/OWNERS
generated
vendored
@ -8,6 +8,7 @@ approvers:
|
||||
- pohly
|
||||
- oomichi
|
||||
- neolit123
|
||||
- SataQiu
|
||||
reviewers:
|
||||
- sig-testing-reviewers
|
||||
- timothysc
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/auth/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/auth/BUILD
generated
vendored
@ -13,7 +13,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
43
vendor/k8s.io/kubernetes/test/e2e/framework/auth/helpers.go
generated
vendored
43
vendor/k8s.io/kubernetes/test/e2e/framework/auth/helpers.go
generated
vendored
@ -17,11 +17,10 @@ limitations under the License.
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/pkg/errors"
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@ -30,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1authorization "k8s.io/client-go/kubernetes/typed/authorization/v1"
|
||||
v1rbac "k8s.io/client-go/kubernetes/typed/rbac/v1"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -66,7 +66,7 @@ func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGette
|
||||
}
|
||||
|
||||
err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) {
|
||||
response, err := c.SubjectAccessReviews().Create(review)
|
||||
response, err := c.SubjectAccessReviews().Create(context.TODO(), review, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -86,7 +86,7 @@ func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv
|
||||
}
|
||||
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{
|
||||
_, err := c.ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + clusterRole,
|
||||
},
|
||||
@ -96,7 +96,7 @@ func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv
|
||||
Name: clusterRole,
|
||||
},
|
||||
Subjects: subjects,
|
||||
})
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "binding clusterrole/%s for %q for %v", clusterRole, ns, subjects)
|
||||
@ -123,7 +123,7 @@ func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rb
|
||||
}
|
||||
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.RoleBindings(ns).Create(&rbacv1.RoleBinding{
|
||||
_, err := c.RoleBindings(ns).Create(context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + role,
|
||||
},
|
||||
@ -133,7 +133,7 @@ func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rb
|
||||
Name: role,
|
||||
},
|
||||
Subjects: subjects,
|
||||
})
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "binding %s/%s into %q for %v", roleType, role, ns, subjects)
|
||||
@ -150,39 +150,18 @@ var (
|
||||
// IsRBACEnabled returns true if RBAC is enabled. Otherwise false.
|
||||
func IsRBACEnabled(crGetter v1rbac.ClusterRolesGetter) bool {
|
||||
isRBACEnabledOnce.Do(func() {
|
||||
crs, err := crGetter.ClusterRoles().List(metav1.ListOptions{})
|
||||
crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
|
||||
e2elog.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
|
||||
isRBACEnabled = false
|
||||
} else if crs == nil || len(crs.Items) == 0 {
|
||||
logf("No ClusterRoles found; assuming RBAC is disabled.")
|
||||
e2elog.Logf("No ClusterRoles found; assuming RBAC is disabled.")
|
||||
isRBACEnabled = false
|
||||
} else {
|
||||
logf("Found ClusterRoles; assuming RBAC is enabled.")
|
||||
e2elog.Logf("Found ClusterRoles; assuming RBAC is enabled.")
|
||||
isRBACEnabled = true
|
||||
}
|
||||
})
|
||||
|
||||
return isRBACEnabled
|
||||
}
|
||||
|
||||
// logf logs INFO lines to the GinkgoWriter.
|
||||
// TODO: Log functions like these should be put into their own package,
|
||||
// see: https://github.com/kubernetes/kubernetes/issues/76728
|
||||
func logf(format string, args ...interface{}) {
|
||||
log("INFO", format, args...)
|
||||
}
|
||||
|
||||
// log prints formatted log messages to the global GinkgoWriter.
|
||||
// TODO: Log functions like these should be put into their own package,
|
||||
// see: https://github.com/kubernetes/kubernetes/issues/76728
|
||||
func log(level string, format string, args ...interface{}) {
|
||||
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
|
||||
}
|
||||
|
||||
// nowStamp returns the current time formatted for placement in the logs (time.StampMilli).
|
||||
// TODO: If only used for logging, this should be put into a logging package,
|
||||
// see: https://github.com/kubernetes/kubernetes/issues/76728
|
||||
func nowStamp() string {
|
||||
return time.Now().Format(time.StampMilli)
|
||||
}
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e/framework/checks.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/framework/checks.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
// IsAppArmorSupported checks whether the AppArmor is supported by the node OS distro.
|
||||
func IsAppArmorSupported() bool {
|
||||
return NodeOSDistroIs(AppArmorDistros...)
|
||||
}
|
5
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
@ -18,6 +18,7 @@ package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
@ -110,14 +111,14 @@ func (f *Framework) ExecShellInContainer(podName, containerName string, cmd stri
|
||||
}
|
||||
|
||||
func (f *Framework) execCommandInPod(podName string, cmd ...string) string {
|
||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "failed to get pod %v", podName)
|
||||
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
|
||||
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
func (f *Framework) execCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
|
||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "failed to get pod %v", podName)
|
||||
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
|
||||
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
|
15
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
@ -45,3 +45,18 @@ func ExpectNoError(err error, explain ...interface{}) {
|
||||
func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter.
|
||||
func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...)
|
||||
}
|
||||
|
||||
// ExpectHaveKey expects the actual map has the key in the keyset
|
||||
func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...)
|
||||
}
|
||||
|
||||
// ExpectEmpty expects actual is empty
|
||||
func ExpectEmpty(actual interface{}, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...)
|
||||
}
|
||||
|
197
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
197
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -22,7 +22,7 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
@ -31,6 +31,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -58,7 +60,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
maxKubectlExecRetries = 5
|
||||
// DefaultNamespaceDeletionTimeout is timeout duration for waiting for a namespace deletion.
|
||||
DefaultNamespaceDeletionTimeout = 5 * time.Minute
|
||||
)
|
||||
@ -73,6 +74,7 @@ type Framework struct {
|
||||
// test multiple times in parallel.
|
||||
UniqueName string
|
||||
|
||||
clientConfig *rest.Config
|
||||
ClientSet clientset.Interface
|
||||
KubemarkExternalClusterClientSet clientset.Interface
|
||||
|
||||
@ -104,6 +106,14 @@ type Framework struct {
|
||||
// should abort, the AfterSuite hook should run all Cleanup actions.
|
||||
cleanupHandle CleanupActionHandle
|
||||
|
||||
// afterEaches is a map of name to function to be called after each test. These are not
|
||||
// cleared. The call order is randomized so that no dependencies can grow between
|
||||
// the various afterEaches
|
||||
afterEaches map[string]AfterEachActionFunc
|
||||
|
||||
// beforeEachStarted indicates that BeforeEach has started
|
||||
beforeEachStarted bool
|
||||
|
||||
// configuration for framework's client
|
||||
Options Options
|
||||
|
||||
@ -115,6 +125,9 @@ type Framework struct {
|
||||
clusterAutoscalerMetricsBeforeTest e2emetrics.Collection
|
||||
}
|
||||
|
||||
// AfterEachActionFunc is a function that can be called after each test
|
||||
type AfterEachActionFunc func(f *Framework, failed bool)
|
||||
|
||||
// TestDataSummary is an interface for managing test data.
|
||||
type TestDataSummary interface {
|
||||
SummaryKind() string
|
||||
@ -148,6 +161,20 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
|
||||
ClientSet: client,
|
||||
}
|
||||
|
||||
f.AddAfterEach("dumpNamespaceInfo", func(f *Framework, failed bool) {
|
||||
if !failed {
|
||||
return
|
||||
}
|
||||
if !TestContext.DumpLogsOnFailure {
|
||||
return
|
||||
}
|
||||
if !f.SkipNamespaceCreation {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(f.BeforeEach)
|
||||
ginkgo.AfterEach(f.AfterEach)
|
||||
|
||||
@ -156,6 +183,8 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
|
||||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
f.beforeEachStarted = true
|
||||
|
||||
// The fact that we need this feels like a bug in ginkgo.
|
||||
// https://github.com/onsi/ginkgo/issues/222
|
||||
f.cleanupHandle = AddCleanupAction(f.AfterEach)
|
||||
@ -172,6 +201,7 @@ func (f *Framework) BeforeEach() {
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
f.clientConfig = rest.CopyConfig(config)
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
@ -317,10 +347,35 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
}
|
||||
}
|
||||
|
||||
// AddAfterEach is a way to add a function to be called after every test. The execution order is intentionally random
|
||||
// to avoid growing dependencies. If you register the same name twice, it is a coding error and will panic.
|
||||
func (f *Framework) AddAfterEach(name string, fn AfterEachActionFunc) {
|
||||
if _, ok := f.afterEaches[name]; ok {
|
||||
panic(fmt.Sprintf("%q is already registered", name))
|
||||
}
|
||||
|
||||
if f.afterEaches == nil {
|
||||
f.afterEaches = map[string]AfterEachActionFunc{}
|
||||
}
|
||||
f.afterEaches[name] = fn
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) AfterEach() {
|
||||
// If BeforeEach never started AfterEach should be skipped.
|
||||
// Currently some tests under e2e/storage have this condition.
|
||||
if !f.beforeEachStarted {
|
||||
return
|
||||
}
|
||||
|
||||
RemoveCleanupAction(f.cleanupHandle)
|
||||
|
||||
// This should not happen. Given ClientSet is a public field a test must have updated it!
|
||||
// Error out early before any API calls during cleanup.
|
||||
if f.ClientSet == nil {
|
||||
Failf("The framework ClientSet must not be nil at this point")
|
||||
}
|
||||
|
||||
// DeleteNamespace at the very end in defer, to avoid any
|
||||
// expectation failures preventing deleting the namespace.
|
||||
defer func() {
|
||||
@ -331,9 +386,14 @@ func (f *Framework) AfterEach() {
|
||||
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
|
||||
if err := f.ClientSet.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil {
|
||||
if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
|
||||
// Dump namespace if we are unable to delete the namespace and the dump was not already performed.
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
|
||||
}
|
||||
} else {
|
||||
Logf("Namespace %v was already deleted", ns.Name)
|
||||
}
|
||||
@ -349,6 +409,7 @@ func (f *Framework) AfterEach() {
|
||||
|
||||
// Paranoia-- prevent reuse!
|
||||
f.Namespace = nil
|
||||
f.clientConfig = nil
|
||||
f.ClientSet = nil
|
||||
f.namespacesToDelete = nil
|
||||
|
||||
@ -362,12 +423,9 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
}()
|
||||
|
||||
// Print events if the test failed.
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
// Pass both unversioned client and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
if !f.SkipNamespaceCreation {
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||
}
|
||||
// run all aftereach functions in random order to ensure no dependencies grow
|
||||
for _, afterEachFn := range f.afterEaches {
|
||||
afterEachFn(f, ginkgo.CurrentGinkgoTestDescription().Failed)
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
@ -487,6 +545,15 @@ func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
|
||||
return e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// ClientConfig an externally accessible method for reading the kube client config.
|
||||
func (f *Framework) ClientConfig() *rest.Config {
|
||||
ret := rest.CopyConfig(f.clientConfig)
|
||||
// json is least common denominator
|
||||
ret.ContentType = runtime.ContentTypeJSON
|
||||
ret.AcceptContentTypes = runtime.ContentTypeJSON
|
||||
return ret
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a substring matcher.
|
||||
@ -501,46 +568,6 @@ func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod,
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
|
||||
}
|
||||
|
||||
// WriteFileViaContainer writes a file using kubectl exec echo <contents> > <path> via specified container
|
||||
// because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
|
||||
func (f *Framework) WriteFileViaContainer(podName, containerName string, path string, contents string) error {
|
||||
ginkgo.By("writing a file in the container")
|
||||
allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
for _, c := range contents {
|
||||
if !strings.ContainsRune(allowedCharacters, c) {
|
||||
return fmt.Errorf("Unsupported character in string to write: %v", c)
|
||||
}
|
||||
}
|
||||
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadFileViaContainer reads a file using kubectl exec cat <path>.
|
||||
func (f *Framework) ReadFileViaContainer(podName, containerName string, path string) (string, error) {
|
||||
ginkgo.By("reading a file in the container")
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
// CheckFileSizeViaContainer returns the list of file size under the specified path.
|
||||
func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path string) (string, error) {
|
||||
ginkgo.By("checking a file size in the container")
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
|
||||
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (*v1.Service, error) {
|
||||
var err error
|
||||
@ -574,7 +601,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
}}
|
||||
}
|
||||
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
|
||||
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service-for-" + appName,
|
||||
Labels: map[string]string{
|
||||
@ -585,7 +612,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
Ports: portsFunc(),
|
||||
Selector: serviceSelector,
|
||||
},
|
||||
})
|
||||
}, metav1.CreateOptions{})
|
||||
ExpectNoError(err)
|
||||
return service
|
||||
}
|
||||
@ -599,13 +626,13 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
|
||||
}
|
||||
for i, node := range nodes.Items {
|
||||
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: podSpec(node),
|
||||
})
|
||||
}, metav1.CreateOptions{})
|
||||
ExpectNoError(err)
|
||||
}
|
||||
return podLabels
|
||||
@ -665,51 +692,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return stdOutBytes, stdErrBytes, err
|
||||
}
|
||||
err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up", maxKubectlExecRetries)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func kubectlExec(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmdArgs := []string{
|
||||
"exec",
|
||||
fmt.Sprintf("--namespace=%v", namespace),
|
||||
podName,
|
||||
fmt.Sprintf("-c=%v", containerName),
|
||||
}
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
cmd := KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
||||
|
||||
// KubeDescribe is wrapper function for ginkgo describe. Adds namespacing.
|
||||
// TODO: Support type safe tagging as well https://github.com/kubernetes/kubernetes/pull/22401.
|
||||
func KubeDescribe(text string, body func()) bool {
|
||||
@ -795,9 +777,9 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin
|
||||
if len(selectors) > 0 {
|
||||
selector = labels.SelectorFromSet(labels.Set(selectors))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pl, err = cli.CoreV1().Pods(ns).List(options)
|
||||
pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
} else {
|
||||
pl, err = cli.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
}
|
||||
return pl, err
|
||||
}
|
||||
@ -895,18 +877,3 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
// preconfiguredRuntimeHandler is the name of the runtime handler that is expected to be
|
||||
// preconfigured in the test environment.
|
||||
preconfiguredRuntimeHandler = "test-handler"
|
||||
)
|
||||
|
||||
// PreconfiguredRuntimeClassHandler returns configured runtime handler.
|
||||
func PreconfiguredRuntimeClassHandler() string {
|
||||
if TestContext.ContainerRuntime == "docker" {
|
||||
return TestContext.ContainerRuntime
|
||||
}
|
||||
|
||||
return preconfiguredRuntimeHandler
|
||||
}
|
||||
|
89
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
89
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
// KubemarkResourceUsage is a struct for tracking the resource usage of kubemark.
|
||||
type KubemarkResourceUsage struct {
|
||||
Name string
|
||||
MemoryWorkingSetInBytes uint64
|
||||
CPUUsageInCores float64
|
||||
}
|
||||
|
||||
func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return sshResult.Stdout, nil
|
||||
}
|
||||
|
||||
// GetKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name.
|
||||
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
|
||||
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
|
||||
result := make(map[string]*KubemarkResourceUsage)
|
||||
// Get kubernetes component resource usage
|
||||
sshResult, err := getMasterUsageByPrefix("kube")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
return nil
|
||||
}
|
||||
scanner := bufio.NewScanner(strings.NewReader(sshResult))
|
||||
for scanner.Scan() {
|
||||
var cpu float64
|
||||
var mem uint64
|
||||
var name string
|
||||
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /usr/local/bin/kube-%s", &cpu, &mem, &name)
|
||||
if name != "" {
|
||||
// Gatherer expects pod_name/container_name format
|
||||
fullName := name + "/" + name
|
||||
result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
|
||||
}
|
||||
}
|
||||
// Get etcd resource usage
|
||||
sshResult, err = getMasterUsageByPrefix("bin/etcd")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||
return nil
|
||||
}
|
||||
scanner = bufio.NewScanner(strings.NewReader(sshResult))
|
||||
for scanner.Scan() {
|
||||
var cpu float64
|
||||
var mem uint64
|
||||
var etcdKind string
|
||||
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /bin/sh -c /usr/local/bin/etcd", &cpu, &mem)
|
||||
dataDirStart := strings.Index(scanner.Text(), "--data-dir")
|
||||
if dataDirStart < 0 {
|
||||
continue
|
||||
}
|
||||
fmt.Sscanf(scanner.Text()[dataDirStart:], "--data-dir=/var/%s", &etcdKind)
|
||||
if etcdKind != "" {
|
||||
// Gatherer expects pod_name/container_name format
|
||||
fullName := "etcd/" + etcdKind
|
||||
result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
38
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
@ -67,44 +67,6 @@ func Fail(message string, callerSkip ...int) {
|
||||
ginkgo.Fail(message, skip)
|
||||
}
|
||||
|
||||
// SkipPanic is the value that will be panicked from Skip.
|
||||
type SkipPanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
// String makes SkipPanic look like the old Ginkgo panic when printed.
|
||||
func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC }
|
||||
|
||||
// Skip wraps ginkgo.Skip so that it panics with more useful
|
||||
// information about why the test is being skipped. This function will
|
||||
// panic with a SkipPanic.
|
||||
func Skip(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
sp := SkipPanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(sp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Skip(message, skip)
|
||||
}
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`)
|
||||
|
81
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
81
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
@ -129,84 +129,3 @@ func LogClusterImageSources() {
|
||||
Logf("cluster images sources, could not write to %q: %v", filePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// CreateManagedInstanceGroup creates a Compute Engine managed instance group.
|
||||
func CreateManagedInstanceGroup(size int64, zone, template string) error {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud.
|
||||
_, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
|
||||
"create",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", zone),
|
||||
TestContext.CloudConfig.NodeInstanceGroup,
|
||||
fmt.Sprintf("--size=%d", size),
|
||||
fmt.Sprintf("--template=%s", template))
|
||||
if err != nil {
|
||||
return fmt.Errorf("gcloud compute instance-groups managed create call failed with err: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetManagedInstanceGroupTemplateName returns the list of Google Compute Engine managed instance groups.
|
||||
func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud. Use InstanceGroupManager to get Instance Template name.
|
||||
|
||||
stdout, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
|
||||
"list",
|
||||
fmt.Sprintf("--filter=name:%s", TestContext.CloudConfig.NodeInstanceGroup),
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zones=%s", zone),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("gcloud compute instance-groups managed list call failed with err: %v", err)
|
||||
}
|
||||
|
||||
templateName, err := parseInstanceTemplateName(stdout)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing gcloud output: %v", err)
|
||||
}
|
||||
return templateName, nil
|
||||
}
|
||||
|
||||
// DeleteManagedInstanceGroup deletes Google Compute Engine managed instance group.
|
||||
func DeleteManagedInstanceGroup(zone string) error {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud.
|
||||
_, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
|
||||
"delete",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", zone),
|
||||
TestContext.CloudConfig.NodeInstanceGroup)
|
||||
if err != nil {
|
||||
return fmt.Errorf("gcloud compute instance-groups managed delete call failed with err: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseInstanceTemplateName(gcloudOutput string) (string, error) {
|
||||
const templateNameField = "INSTANCE_TEMPLATE"
|
||||
|
||||
lines := strings.Split(gcloudOutput, "\n")
|
||||
if len(lines) <= 1 { // Empty output or only contains column names
|
||||
return "", fmt.Errorf("the list is empty")
|
||||
}
|
||||
|
||||
// Otherwise, there should be exactly 1 entry, i.e. 2 lines
|
||||
fieldNames := strings.Fields(lines[0])
|
||||
instanceTemplateColumn := 0
|
||||
for instanceTemplateColumn < len(fieldNames) &&
|
||||
fieldNames[instanceTemplateColumn] != templateNameField {
|
||||
instanceTemplateColumn++
|
||||
}
|
||||
|
||||
if instanceTemplateColumn == len(fieldNames) {
|
||||
return "", fmt.Errorf("the list does not contain instance template information")
|
||||
}
|
||||
|
||||
fields := strings.Fields(lines[1])
|
||||
instanceTemplateName := fields[instanceTemplateColumn]
|
||||
|
||||
return instanceTemplateName, nil
|
||||
}
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/BUILD
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/BUILD
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["kubectl_utils.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/kubectl",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
203
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
Normal file
203
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubectl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const (
|
||||
maxKubectlExecRetries = 5
|
||||
)
|
||||
|
||||
// TestKubeconfig is a struct containing the needed attributes from TestContext and Framework(Namespace).
|
||||
type TestKubeconfig struct {
|
||||
CertDir string
|
||||
Host string
|
||||
KubeConfig string
|
||||
KubeContext string
|
||||
KubectlPath string
|
||||
Namespace string // Every test has at least one namespace unless creation is skipped
|
||||
}
|
||||
|
||||
// NewTestKubeconfig returns a new Kubeconfig struct instance.
|
||||
func NewTestKubeconfig(certdir, host, kubeconfig, kubecontext, kubectlpath, namespace string) *TestKubeconfig {
|
||||
return &TestKubeconfig{
|
||||
CertDir: certdir,
|
||||
Host: host,
|
||||
KubeConfig: kubeconfig,
|
||||
KubeContext: kubecontext,
|
||||
KubectlPath: kubectlpath,
|
||||
Namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// KubectlCmd runs the kubectl executable through the wrapper script.
|
||||
func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd {
|
||||
defaultArgs := []string{}
|
||||
|
||||
// Reference a --server option so tests can run anywhere.
|
||||
if tk.Host != "" {
|
||||
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+tk.Host)
|
||||
}
|
||||
if tk.KubeConfig != "" {
|
||||
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+tk.KubeConfig)
|
||||
|
||||
// Reference the KubeContext
|
||||
if tk.KubeContext != "" {
|
||||
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+tk.KubeContext)
|
||||
}
|
||||
|
||||
} else {
|
||||
if tk.CertDir != "" {
|
||||
defaultArgs = append(defaultArgs,
|
||||
fmt.Sprintf("--certificate-authority=%s", filepath.Join(tk.CertDir, "ca.crt")),
|
||||
fmt.Sprintf("--client-certificate=%s", filepath.Join(tk.CertDir, "kubecfg.crt")),
|
||||
fmt.Sprintf("--client-key=%s", filepath.Join(tk.CertDir, "kubecfg.key")))
|
||||
}
|
||||
}
|
||||
kubectlArgs := append(defaultArgs, args...)
|
||||
|
||||
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
|
||||
//and so on.
|
||||
cmd := exec.Command(tk.KubectlPath, kubectlArgs...)
|
||||
|
||||
//caller will invoke this and wait on it.
|
||||
return cmd
|
||||
}
|
||||
|
||||
// LogFailedContainers runs `kubectl logs` on a failed containers.
|
||||
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
|
||||
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
logFunc("Error getting pods in namespace '%s': %v", ns, err)
|
||||
return
|
||||
}
|
||||
logFunc("Running kubectl logs on non-ready containers in %v", ns)
|
||||
for _, pod := range podList.Items {
|
||||
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
|
||||
kubectlLogPod(c, pod, "", e2elog.Logf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if strings.Contains(container.Name, containerNameSubstr) {
|
||||
// Contains() matches all strings if substr is empty
|
||||
logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
|
||||
if err != nil {
|
||||
logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
|
||||
if err != nil {
|
||||
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
|
||||
}
|
||||
}
|
||||
logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)
|
||||
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteFileViaContainer writes a file using kubectl exec echo <contents> > <path> via specified container
|
||||
// because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
|
||||
func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, path string, contents string) error {
|
||||
ginkgo.By("writing a file in the container")
|
||||
allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
for _, c := range contents {
|
||||
if !strings.ContainsRune(allowedCharacters, c) {
|
||||
return fmt.Errorf("Unsupported character in string to write: %v", c)
|
||||
}
|
||||
}
|
||||
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadFileViaContainer reads a file using kubectl exec cat <path>.
|
||||
func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, path string) (string, error) {
|
||||
ginkgo.By("reading a file in the container")
|
||||
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
func (tk *TestKubeconfig) kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := tk.kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return stdOutBytes, stdErrBytes, err
|
||||
}
|
||||
err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up", maxKubectlExecRetries)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func (tk *TestKubeconfig) kubectlExec(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmdArgs := []string{
|
||||
"exec",
|
||||
fmt.Sprintf("--namespace=%v", namespace),
|
||||
podName,
|
||||
fmt.Sprintf("-c=%v", containerName),
|
||||
}
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
cmd := tk.KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
@ -17,10 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -78,12 +78,14 @@ var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`)
|
||||
// This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25:
|
||||
// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter)
|
||||
// - source code filtering updated to be specific to Kubernetes
|
||||
func PrunedStack(skip int) string {
|
||||
fullStackTrace := string(debug.Stack())
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
// - optimized to use bytes and in-place slice filtering from
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
func PrunedStack(skip int) []byte {
|
||||
fullStackTrace := debug.Stack()
|
||||
stack := bytes.Split(fullStackTrace, []byte("\n"))
|
||||
// Ensure that the even entries are the method names and the
|
||||
// the odd entries the source code information.
|
||||
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
|
||||
if len(stack) > 0 && bytes.HasPrefix(stack[0], []byte("goroutine ")) {
|
||||
// Ignore "goroutine 29 [running]:" line.
|
||||
stack = stack[1:]
|
||||
}
|
||||
@ -94,13 +96,16 @@ func PrunedStack(skip int) string {
|
||||
if len(stack) > 2*skip {
|
||||
stack = stack[2*skip:]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
n := 0
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
// We filter out based on the source code file name.
|
||||
if !codeFilterRE.Match([]byte(stack[i*2+1])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
stack[n] = stack[i*2]
|
||||
stack[n+1] = stack[i*2+1]
|
||||
n += 2
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
stack = stack[:n]
|
||||
|
||||
return bytes.Join(stack, []byte("\n"))
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/framework/log/logger.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/framework/log/logger.go
generated
vendored
@ -52,14 +52,3 @@ func FailfWithOffset(offset int, format string, args ...interface{}) {
|
||||
log("FAIL", msg)
|
||||
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
|
||||
}
|
||||
|
||||
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs
|
||||
// and then calls ginkgowrapper.Fail.
|
||||
func Fail(msg string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
log("FAIL", msg)
|
||||
ginkgowrapper.Fail(nowStamp()+": "+msg, skip)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
@ -19,20 +19,20 @@ go_library(
|
||||
"metrics_grabber.go",
|
||||
"pod.go",
|
||||
"scheduler_metrics.go",
|
||||
"scheduling.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/metrics",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics/testutil:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/e2e/system:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/api_server_metrics.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/api_server_metrics.go
generated
vendored
@ -16,7 +16,11 @@ limitations under the License.
|
||||
|
||||
package metrics
|
||||
|
||||
import "k8s.io/component-base/metrics/testutil"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
)
|
||||
|
||||
// APIServerMetrics is metrics for API server
|
||||
type APIServerMetrics testutil.Metrics
|
||||
@ -40,7 +44,7 @@ func parseAPIServerMetrics(data string) (APIServerMetrics, error) {
|
||||
}
|
||||
|
||||
func (g *Grabber) getMetricsFromAPIServer() (string, error) {
|
||||
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do().Raw()
|
||||
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do(context.TODO()).Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@ -85,7 +86,7 @@ func (g *Grabber) getMetricsFromNode(nodeName string, kubeletPort int) (string,
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
Do(context.TODO()).Raw()
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
|
43
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/latencies.go
generated
vendored
43
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/latencies.go
generated
vendored
@ -17,35 +17,9 @@ limitations under the License.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const (
|
||||
// SingleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||
SingleCallTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// VerifyLatencyWithinThreshold verifies whether 50, 90 and 99th percentiles of a latency metric are
|
||||
// within the expected threshold.
|
||||
func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName string) error {
|
||||
if actual.Perc50 > threshold.Perc50 {
|
||||
return fmt.Errorf("too high %v latency 50th percentile: %v", metricName, actual.Perc50)
|
||||
}
|
||||
if actual.Perc90 > threshold.Perc90 {
|
||||
return fmt.Errorf("too high %v latency 90th percentile: %v", metricName, actual.Perc90)
|
||||
}
|
||||
if actual.Perc99 > threshold.Perc99 {
|
||||
return fmt.Errorf("too high %v latency 99th percentile: %v", metricName, actual.Perc99)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodLatencyData encapsulates pod startup latency information.
|
||||
type PodLatencyData struct {
|
||||
// Name of the pod
|
||||
@ -62,20 +36,3 @@ type LatencySlice []PodLatencyData
|
||||
func (a LatencySlice) Len() int { return len(a) }
|
||||
func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
|
||||
|
||||
// ExtractLatencyMetrics returns latency metrics for each percentile(50th, 90th and 99th).
|
||||
func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
|
||||
length := len(latencies)
|
||||
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
|
||||
perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency
|
||||
perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].Latency
|
||||
perc100 := latencies[length-1].Latency
|
||||
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99, Perc100: perc100}
|
||||
}
|
||||
|
||||
// PrintLatencies outputs latencies to log with readable format.
|
||||
func PrintLatencies(latencies []PodLatencyData, header string) {
|
||||
metrics := ExtractLatencyMetrics(latencies)
|
||||
e2elog.Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
|
||||
e2elog.Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
|
||||
}
|
||||
|
57
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
57
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
@ -17,13 +17,17 @@ limitations under the License.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/system"
|
||||
|
||||
"k8s.io/klog"
|
||||
@ -40,22 +44,23 @@ type Collection struct {
|
||||
|
||||
// Grabber provides functions which grab metrics from components
|
||||
type Grabber struct {
|
||||
client clientset.Interface
|
||||
externalClient clientset.Interface
|
||||
grabFromAPIServer bool
|
||||
grabFromControllerManager bool
|
||||
grabFromKubelets bool
|
||||
grabFromScheduler bool
|
||||
grabFromClusterAutoscaler bool
|
||||
masterName string
|
||||
registeredMaster bool
|
||||
client clientset.Interface
|
||||
externalClient clientset.Interface
|
||||
grabFromAPIServer bool
|
||||
grabFromControllerManager bool
|
||||
grabFromKubelets bool
|
||||
grabFromScheduler bool
|
||||
grabFromClusterAutoscaler bool
|
||||
masterName string
|
||||
registeredMaster bool
|
||||
waitForControllerManagerReadyOnce sync.Once
|
||||
}
|
||||
|
||||
// NewMetricsGrabber returns new metrics which are initialized.
|
||||
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) {
|
||||
registeredMaster := false
|
||||
masterName := ""
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -100,7 +105,7 @@ func (g *Grabber) HasRegisteredMaster() bool {
|
||||
|
||||
// GrabFromKubelet returns metrics from kubelet
|
||||
func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
|
||||
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()})
|
||||
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
@ -160,7 +165,29 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
|
||||
if !g.registeredMaster {
|
||||
return ControllerManagerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ControllerManager's metrics gathering")
|
||||
}
|
||||
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
|
||||
|
||||
var err error
|
||||
podName := fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName)
|
||||
g.waitForControllerManagerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for controller manager pod to be ready: %w", readyErr)
|
||||
return
|
||||
}
|
||||
|
||||
var lastMetricsFetchErr error
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err = fmt.Errorf("error waiting for controller manager pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
|
||||
output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
|
||||
if err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
@ -214,7 +241,7 @@ func (g *Grabber) Grab() (Collection, error) {
|
||||
}
|
||||
if g.grabFromKubelets {
|
||||
result.KubeletMetrics = make(map[string]KubeletMetrics)
|
||||
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
@ -241,7 +268,7 @@ func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string,
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", podName, port)).
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
Do(context.TODO()).Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/scheduling.go
generated
vendored
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/scheduling.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
// SchedulingMetrics is a struct for managing scheduling metrics.
|
||||
type SchedulingMetrics struct {
|
||||
PredicateEvaluationLatency LatencyMetric `json:"predicateEvaluationLatency"`
|
||||
PriorityEvaluationLatency LatencyMetric `json:"priorityEvaluationLatency"`
|
||||
PreemptionEvaluationLatency LatencyMetric `json:"preemptionEvaluationLatency"`
|
||||
BindingLatency LatencyMetric `json:"bindingLatency"`
|
||||
ThroughputAverage float64 `json:"throughputAverage"`
|
||||
ThroughputPerc50 float64 `json:"throughputPerc50"`
|
||||
ThroughputPerc90 float64 `json:"throughputPerc90"`
|
||||
ThroughputPerc99 float64 `json:"throughputPerc99"`
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of scheduling metrics.
|
||||
func (l *SchedulingMetrics) SummaryKind() string {
|
||||
return "SchedulingMetrics"
|
||||
}
|
||||
|
||||
// PrintHumanReadable returns scheduling metrics with JSON format.
|
||||
func (l *SchedulingMetrics) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
// PrintJSON returns scheduling metrics with JSON format.
|
||||
func (l *SchedulingMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
5
vendor/k8s.io/kubernetes/test/e2e/framework/node/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/framework/node/BUILD
generated
vendored
@ -4,13 +4,14 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"resource.go",
|
||||
"runtimeclass.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/node",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -22,6 +23,8 @@ go_library(
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/system:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
109
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
109
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
@ -24,17 +25,14 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/system"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -43,7 +41,6 @@ const (
|
||||
|
||||
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||
singleCallTimeout = 5 * time.Minute
|
||||
|
||||
// ssh port
|
||||
@ -59,21 +56,17 @@ type PodNode struct {
|
||||
}
|
||||
|
||||
// FirstAddress returns the first address of the given type of each node.
|
||||
// TODO: Use return type string instead of []string
|
||||
func FirstAddress(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
|
||||
hosts := []string{}
|
||||
func FirstAddress(nodelist *v1.NodeList, addrType v1.NodeAddressType) string {
|
||||
for _, n := range nodelist.Items {
|
||||
for _, addr := range n.Status.Addresses {
|
||||
if addr.Type == addrType && addr.Address != "" {
|
||||
hosts = append(hosts, addr.Address)
|
||||
break
|
||||
return addr.Address
|
||||
}
|
||||
}
|
||||
}
|
||||
return hosts
|
||||
return ""
|
||||
}
|
||||
|
||||
// TODO: better to change to a easy read name
|
||||
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
|
||||
// Check the node readiness condition (logging all).
|
||||
for _, cond := range node.Status.Conditions {
|
||||
@ -144,8 +137,8 @@ func IsConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditio
|
||||
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
|
||||
}
|
||||
|
||||
// IsConditionUnset returns true if conditions of the given node do not have a match to the given conditionType, otherwise false.
|
||||
func IsConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
|
||||
// isConditionUnset returns true if conditions of the given node do not have a match to the given conditionType, otherwise false.
|
||||
func isConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
|
||||
for _, cond := range node.Status.Conditions {
|
||||
if cond.Type == conditionType {
|
||||
return false
|
||||
@ -156,7 +149,6 @@ func IsConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
|
||||
|
||||
// Filter filters nodes in NodeList in place, removing nodes that do not
|
||||
// satisfy the given condition
|
||||
// TODO: consider merging with pkg/client/cache.NodeLister
|
||||
func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
|
||||
var l []v1.Node
|
||||
|
||||
@ -193,63 +185,6 @@ func TotalReady(c clientset.Interface) (int, error) {
|
||||
return len(nodes.Items), nil
|
||||
}
|
||||
|
||||
// getSvcNodePort returns the node port for the given service:port.
|
||||
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
|
||||
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.Port == int32(svcPort) {
|
||||
if p.NodePort != 0 {
|
||||
return int(p.NodePort), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf(
|
||||
"No node port found for service %v, port %v", name, svcPort)
|
||||
}
|
||||
|
||||
// GetPortURL returns the url to a nodeport Service.
|
||||
func GetPortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
|
||||
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// This list of nodes must not include the master, which is marked
|
||||
// unschedulable, since the master doesn't run kube-proxy. Without
|
||||
// kube-proxy NodePorts won't work.
|
||||
var nodes *v1.NodeList
|
||||
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
|
||||
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}) != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(nodes.Items) == 0 {
|
||||
return "", fmt.Errorf("Unable to list nodes in cluster")
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeExternalIP {
|
||||
if address.Address != "" {
|
||||
host := net.JoinHostPort(address.Address, fmt.Sprint(nodePort))
|
||||
return fmt.Sprintf("http://%s", host), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Failed to find external address for service %v", name)
|
||||
}
|
||||
|
||||
// GetExternalIP returns node external IP concatenated with port 22 for ssh
|
||||
// e.g. 1.2.3.4:22
|
||||
func GetExternalIP(node *v1.Node) (string, error) {
|
||||
@ -271,11 +206,9 @@ func GetExternalIP(node *v1.Node) (string, error) {
|
||||
func GetInternalIP(node *v1.Node) (string, error) {
|
||||
host := ""
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeInternalIP {
|
||||
if address.Address != "" {
|
||||
host = net.JoinHostPort(address.Address, sshPort)
|
||||
break
|
||||
}
|
||||
if address.Type == v1.NodeInternalIP && address.Address != "" {
|
||||
host = net.JoinHostPort(address.Address, sshPort)
|
||||
break
|
||||
}
|
||||
}
|
||||
if host == "" {
|
||||
@ -336,14 +269,13 @@ func GetPublicIps(c clientset.Interface) ([]string, error) {
|
||||
// 2) Needs to be ready.
|
||||
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
||||
// If there are no nodes that are both ready and schedulable, this will return an error.
|
||||
// TODO: remove references in framework/util.go.
|
||||
func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = checkWaitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||
}
|
||||
Filter(nodes, func(node v1.Node) bool {
|
||||
return IsNodeSchedulable(&node) && IsNodeUntainted(&node)
|
||||
return IsNodeSchedulable(&node) && isNodeUntainted(&node)
|
||||
})
|
||||
if len(nodes.Items) == 0 {
|
||||
return nil, fmt.Errorf("there are currently no ready, schedulable nodes in the cluster")
|
||||
@ -401,23 +333,23 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e
|
||||
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
|
||||
nodes := &v1.NodeList{}
|
||||
masters := sets.NewString()
|
||||
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("get nodes error: %s", err)
|
||||
}
|
||||
for _, n := range all.Items {
|
||||
if system.DeprecatedMightBeMasterNode(n.Name) {
|
||||
masters.Insert(n.Name)
|
||||
} else if IsNodeSchedulable(&n) && IsNodeUntainted(&n) {
|
||||
} else if IsNodeSchedulable(&n) && isNodeUntainted(&n) {
|
||||
nodes.Items = append(nodes.Items, n)
|
||||
}
|
||||
}
|
||||
return masters, nodes, nil
|
||||
}
|
||||
|
||||
// IsNodeUntainted tests whether a fake pod can be scheduled on "node", given its current taints.
|
||||
// isNodeUntainted tests whether a fake pod can be scheduled on "node", given its current taints.
|
||||
// TODO: need to discuss wether to return bool and error type
|
||||
func IsNodeUntainted(node *v1.Node) bool {
|
||||
func isNodeUntainted(node *v1.Node) bool {
|
||||
return isNodeUntaintedWithNonblocking(node, "")
|
||||
}
|
||||
|
||||
@ -466,12 +398,15 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
|
||||
nodeInfo.SetNode(node)
|
||||
}
|
||||
|
||||
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
|
||||
taints, err := nodeInfo.Taints()
|
||||
if err != nil {
|
||||
e2elog.Failf("Can't test predicates for node %s: %v", node.Name, err)
|
||||
return false
|
||||
}
|
||||
return fit
|
||||
|
||||
return v1helper.TolerationsTolerateTaintsWithFilter(fakePod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
|
||||
return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule
|
||||
})
|
||||
}
|
||||
|
||||
// IsNodeSchedulable returns true if:
|
||||
@ -489,7 +424,7 @@ func IsNodeSchedulable(node *v1.Node) bool {
|
||||
// 2) doesn't have NetworkUnavailable condition set to true
|
||||
func IsNodeReady(node *v1.Node) bool {
|
||||
nodeReady := IsConditionSetAsExpected(node, v1.NodeReady, true)
|
||||
networkReady := IsConditionUnset(node, v1.NodeNetworkUnavailable) ||
|
||||
networkReady := isConditionUnset(node, v1.NodeNetworkUnavailable) ||
|
||||
IsConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
|
||||
return nodeReady && networkReady
|
||||
}
|
||||
@ -522,7 +457,7 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
|
||||
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
|
||||
var result []PodNode
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
56
vendor/k8s.io/kubernetes/test/e2e/framework/node/runtimeclass.go
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/test/e2e/framework/node/runtimeclass.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
// PreconfiguredRuntimeClassHandler returns configured runtime handler.
|
||||
func PreconfiguredRuntimeClassHandler(handler string) string {
|
||||
if handler == "docker" {
|
||||
return handler
|
||||
}
|
||||
|
||||
// test-handler is the name of the runtime handler that is expected to be
|
||||
// preconfigured in the test environment.
|
||||
return "test-handler"
|
||||
}
|
||||
|
||||
// NewRuntimeClassPod returns a test pod with the given runtimeClassName
|
||||
func NewRuntimeClassPod(runtimeClassName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("test-runtimeclass-%s-", runtimeClassName),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RuntimeClassName: &runtimeClassName,
|
||||
Containers: []v1.Container{{
|
||||
Name: "test",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"true"},
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
AutomountServiceAccountToken: utilpointer.BoolPtr(false),
|
||||
},
|
||||
}
|
||||
}
|
15
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
@ -56,7 +57,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
@ -68,7 +69,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
|
||||
notReady = append(notReady, node)
|
||||
}
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -122,7 +123,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
|
||||
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
|
||||
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Couldn't get node %s", name)
|
||||
continue
|
||||
@ -163,7 +164,7 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
|
||||
// Filter out not-ready nodes.
|
||||
Filter(nodes, func(node v1.Node) bool {
|
||||
nodeReady := IsConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||
networkReady := IsConditionUnset(&node, v1.NodeNetworkUnavailable) || IsConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
|
||||
networkReady := isConditionUnset(&node, v1.NodeNetworkUnavailable) || IsConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
|
||||
return nodeReady && networkReady
|
||||
})
|
||||
numReady := len(nodes.Items)
|
||||
@ -182,7 +183,7 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||
var nodes *v1.NodeList
|
||||
var err error
|
||||
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
|
||||
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
@ -219,7 +220,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
ResourceVersion: "0",
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
|
||||
}
|
||||
nodes, err := c.CoreV1().Nodes().List(opts)
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error listing nodes: %v", err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
@ -273,7 +274,7 @@ func readyForTests(node *v1.Node, nonblockingTaints string) bool {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if !IsNodeSchedulable(node) || !IsNodeUntainted(node) {
|
||||
if !IsNodeSchedulable(node) || !isNodeUntainted(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -47,12 +47,12 @@ func EtcdUpgrade(targetStorage, targetVersion string) error {
|
||||
}
|
||||
|
||||
// MasterUpgrade upgrades master node on GCE/GKE.
|
||||
func MasterUpgrade(v string) error {
|
||||
func MasterUpgrade(f *Framework, v string) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
return masterUpgradeGCE(v, false)
|
||||
case "gke":
|
||||
return masterUpgradeGKE(v)
|
||||
return masterUpgradeGKE(f.Namespace.Name, v)
|
||||
case "kubernetes-anywhere":
|
||||
return masterUpgradeKubernetesAnywhere(v)
|
||||
default:
|
||||
@ -113,7 +113,7 @@ func appendContainerCommandGroupIfNeeded(args []string) []string {
|
||||
return args
|
||||
}
|
||||
|
||||
func masterUpgradeGKE(v string) error {
|
||||
func masterUpgradeGKE(namespace string, v string) error {
|
||||
Logf("Upgrading master to %q", v)
|
||||
args := []string{
|
||||
"container",
|
||||
@ -131,7 +131,7 @@ func masterUpgradeGKE(v string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
waitForSSHTunnels()
|
||||
waitForSSHTunnels(namespace)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -181,7 +181,7 @@ func NodeUpgrade(f *Framework, v string, img string) error {
|
||||
case "gce":
|
||||
err = nodeUpgradeGCE(v, img, false)
|
||||
case "gke":
|
||||
err = nodeUpgradeGKE(v, img)
|
||||
err = nodeUpgradeGKE(f.Namespace.Name, v, img)
|
||||
default:
|
||||
err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
@ -230,7 +230,7 @@ func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func nodeUpgradeGKE(v string, img string) error {
|
||||
func nodeUpgradeGKE(namespace string, v string, img string) error {
|
||||
Logf("Upgrading nodes to version %q and image %q", v, img)
|
||||
nps, err := nodePoolsGKE()
|
||||
if err != nil {
|
||||
@ -258,7 +258,7 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
waitForSSHTunnels()
|
||||
waitForSSHTunnels(namespace)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -290,18 +290,18 @@ func gceUpgradeScript() string {
|
||||
return TestContext.GCEUpgradeScript
|
||||
}
|
||||
|
||||
func waitForSSHTunnels() {
|
||||
func waitForSSHTunnels(namespace string) {
|
||||
Logf("Waiting for SSH tunnels to establish")
|
||||
RunKubectl("run", "ssh-tunnel-test",
|
||||
RunKubectl(namespace, "run", "ssh-tunnel-test",
|
||||
"--image=busybox",
|
||||
"--restart=Never",
|
||||
"--command", "--",
|
||||
"echo", "Hello")
|
||||
defer RunKubectl("delete", "pod", "ssh-tunnel-test")
|
||||
defer RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
|
||||
|
||||
// allow up to a minute for new ssh tunnels to establish
|
||||
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
_, err := RunKubectl("logs", "ssh-tunnel-test")
|
||||
_, err := RunKubectl(namespace, "logs", "ssh-tunnel-test")
|
||||
return err == nil, nil
|
||||
})
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/pod/BUILD
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/pod/BUILD
generated
vendored
@ -1,4 +1,4 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -14,7 +14,6 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
@ -23,7 +22,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
@ -49,3 +47,15 @@ filegroup(
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["resource_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
53
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
53
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -24,7 +25,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -33,44 +33,10 @@ var (
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
)
|
||||
|
||||
// CreateWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
|
||||
// Note: need named return value so that the err assignment in the defer sets the returned error.
|
||||
// Has been shown to be necessary using Go 1.7.
|
||||
func CreateWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
|
||||
e2elog.Logf("Creating nfs test pod")
|
||||
pod := MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
|
||||
runPod, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
delErr := DeletePodWithWait(c, runPod)
|
||||
if err == nil { // don't override previous err value
|
||||
err = delErr // assign to returned err, can be nil
|
||||
}
|
||||
}()
|
||||
|
||||
err = testPodSuccessOrFail(c, ns, runPod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
|
||||
}
|
||||
return // note: named return value
|
||||
}
|
||||
|
||||
// testPodSuccessOrFail tests whether the pod's exit code is zero.
|
||||
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
e2elog.Logf("Pod should terminate with exitcode 0 (success)")
|
||||
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
|
||||
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
|
||||
}
|
||||
e2elog.Logf("Pod %v succeeded ", pod.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateUnschedulablePod with given claims based on node selector
|
||||
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
@ -80,7 +46,7 @@ func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSe
|
||||
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
@ -95,7 +61,7 @@ func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeC
|
||||
// CreatePod with given claims based on node selector
|
||||
func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
@ -105,7 +71,7 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
@ -120,12 +86,9 @@ func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.P
|
||||
// CreateSecPodWithNodeSelection creates security pod with given claims
|
||||
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||
// Setting node
|
||||
pod.Spec.NodeName = node.Name
|
||||
pod.Spec.NodeSelector = node.Selector
|
||||
pod.Spec.Affinity = node.Affinity
|
||||
SetNodeSelection(&pod.Spec, node)
|
||||
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
@ -136,7 +99,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string,
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go
generated
vendored
@ -17,13 +17,15 @@ limitations under the License.
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
@ -36,7 +38,7 @@ const (
|
||||
// DeletePodOrFail deletes the pod of the specified namespace and name.
|
||||
func DeletePodOrFail(c clientset.Interface, ns, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
expectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
|
||||
}
|
||||
|
||||
@ -53,9 +55,9 @@ func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
|
||||
// not existing.
|
||||
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
|
||||
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
|
||||
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil // assume pod was already deleted
|
||||
}
|
||||
return fmt.Errorf("pod Delete API error: %v", err)
|
||||
|
46
vendor/k8s.io/kubernetes/test/e2e/framework/pod/node_selection.go
generated
vendored
46
vendor/k8s.io/kubernetes/test/e2e/framework/pod/node_selection.go
generated
vendored
@ -48,6 +48,28 @@ func setNodeAffinityRequirement(nodeSelection *NodeSelection, operator v1.NodeSe
|
||||
})
|
||||
}
|
||||
|
||||
// SetNodeAffinityTopologyRequirement sets node affinity to a specified topology
|
||||
func SetNodeAffinityTopologyRequirement(nodeSelection *NodeSelection, topology map[string]string) {
|
||||
if nodeSelection.Affinity == nil {
|
||||
nodeSelection.Affinity = &v1.Affinity{}
|
||||
}
|
||||
if nodeSelection.Affinity.NodeAffinity == nil {
|
||||
nodeSelection.Affinity.NodeAffinity = &v1.NodeAffinity{}
|
||||
}
|
||||
if nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
|
||||
}
|
||||
for k, v := range topology {
|
||||
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
|
||||
v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// SetAffinity sets affinity to nodeName to nodeSelection
|
||||
func SetAffinity(nodeSelection *NodeSelection, nodeName string) {
|
||||
setNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpIn, nodeName)
|
||||
@ -57,3 +79,27 @@ func SetAffinity(nodeSelection *NodeSelection, nodeName string) {
|
||||
func SetAntiAffinity(nodeSelection *NodeSelection, nodeName string) {
|
||||
setNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
|
||||
}
|
||||
|
||||
// SetNodeAffinity modifies the given pod object with
|
||||
// NodeAffinity to the given node name.
|
||||
func SetNodeAffinity(podSpec *v1.PodSpec, nodeName string) {
|
||||
nodeSelection := &NodeSelection{}
|
||||
SetAffinity(nodeSelection, nodeName)
|
||||
podSpec.Affinity = nodeSelection.Affinity
|
||||
}
|
||||
|
||||
// SetNodeSelection modifies the given pod object with
|
||||
// the specified NodeSelection
|
||||
func SetNodeSelection(podSpec *v1.PodSpec, nodeSelection NodeSelection) {
|
||||
podSpec.NodeSelector = nodeSelection.Selector
|
||||
podSpec.Affinity = nodeSelection.Affinity
|
||||
// pod.Spec.NodeName should not be set directly because
|
||||
// it will bypass the scheduler, potentially causing
|
||||
// kubelet to Fail the pod immediately if it's out of
|
||||
// resources. Instead, we want the pod to remain
|
||||
// pending in the scheduler until the node has resources
|
||||
// freed up.
|
||||
if nodeSelection.Name != "" {
|
||||
SetNodeAffinity(podSpec, nodeSelection.Name)
|
||||
}
|
||||
}
|
||||
|
110
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
110
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -30,7 +30,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
@ -87,7 +86,7 @@ func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Sele
|
||||
func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
successes := 0
|
||||
options := metav1.ListOptions{LabelSelector: r.label.String()}
|
||||
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
|
||||
currentPods, err := r.c.CoreV1().Pods(r.ns).List(context.TODO(), options)
|
||||
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
|
||||
for i, pod := range r.pods.Items {
|
||||
// Check that the replica list remains unchanged, otherwise we have problems.
|
||||
@ -99,12 +98,11 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
defer cancel()
|
||||
|
||||
body, err := r.c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(r.ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(string(pod.Name)).
|
||||
Do().
|
||||
Do(ctx).
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
@ -147,36 +145,9 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CountRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
|
||||
func CountRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
|
||||
// check for remaining pods
|
||||
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// nothing remains!
|
||||
if len(pods.Items) == 0 {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
// stuff remains, log about it
|
||||
LogPodStates(pods.Items)
|
||||
|
||||
// check if there were any pods with missing deletion timestamp
|
||||
numPods := len(pods.Items)
|
||||
missingTimestamp := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp == nil {
|
||||
missingTimestamp++
|
||||
}
|
||||
}
|
||||
return numPods, missingTimestamp, nil
|
||||
}
|
||||
|
||||
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -192,7 +163,7 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition
|
||||
|
||||
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -206,23 +177,26 @@ func podCompleted(c clientset.Interface, podName, namespace string) wait.Conditi
|
||||
|
||||
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
e2elog.Logf("The status of Pod %s is %s which is unexpected", podName, pod.Status.Phase)
|
||||
return false, conditions.ErrPodCompleted
|
||||
case v1.PodRunning:
|
||||
e2elog.Logf("The status of Pod %s is %s (Ready = %v)", podName, pod.Status.Phase, podutil.IsPodReady(pod))
|
||||
return podutil.IsPodReady(pod), nil
|
||||
}
|
||||
e2elog.Logf("The status of Pod %s is %s, waiting for it to be Running (with Ready = true)", podName, pod.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -248,7 +222,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -321,15 +295,6 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
|
||||
return e
|
||||
}
|
||||
|
||||
// DumpAllPodInfo logs basic info for all pods.
|
||||
func DumpAllPodInfo(c clientset.Interface) {
|
||||
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
LogPodStates(pods.Items)
|
||||
}
|
||||
|
||||
// LogPodStates logs basic info of provided pods for debugging.
|
||||
func LogPodStates(pods []v1.Pod) {
|
||||
// Find maximum widths for pod, node, and phase strings for column printing.
|
||||
@ -385,7 +350,7 @@ func logPodTerminationMessages(pods []v1.Pod) {
|
||||
|
||||
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
|
||||
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) {
|
||||
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
@ -471,10 +436,10 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
|
||||
if tweak != nil {
|
||||
tweak(pod)
|
||||
}
|
||||
execPod, err := client.CoreV1().Pods(ns).Create(pod)
|
||||
execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
|
||||
err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
|
||||
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(execPod.Name, metav1.GetOptions{})
|
||||
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(context.TODO(), execPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
@ -532,7 +497,6 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
|
||||
}
|
||||
|
||||
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
|
||||
// TODO(random-liu): Change this to be a member function of the framework.
|
||||
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, false)
|
||||
}
|
||||
@ -551,12 +515,12 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
|
||||
Name(podName).SubResource("log").
|
||||
Param("container", containerName).
|
||||
Param("previous", strconv.FormatBool(previous)).
|
||||
Do().
|
||||
Do(context.TODO()).
|
||||
Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err == nil && strings.Contains(string(logs), "Internal Error") {
|
||||
if strings.Contains(string(logs), "Internal Error") {
|
||||
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q", string(logs))
|
||||
}
|
||||
return string(logs), err
|
||||
@ -564,13 +528,14 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
|
||||
|
||||
// GetPodsInNamespace returns the pods in the given namespace.
|
||||
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
||||
filtered := []*v1.Pod{}
|
||||
for _, p := range pods.Items {
|
||||
var filtered []*v1.Pod
|
||||
for i := range pods.Items {
|
||||
p := pods.Items[i]
|
||||
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
|
||||
continue
|
||||
}
|
||||
@ -578,40 +543,3 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
|
||||
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
|
||||
for _, pod := range pods.Items {
|
||||
if !masterNodes.Has(pod.Spec.NodeName) {
|
||||
if pod.Spec.NodeName != "" {
|
||||
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
|
||||
gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue))
|
||||
scheduledPods = append(scheduledPods, pod)
|
||||
} else {
|
||||
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
|
||||
gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse))
|
||||
if scheduledCondition.Reason == "Unschedulable" {
|
||||
|
||||
notScheduledPods = append(notScheduledPods, pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PatchContainerImages replaces the specified Container Registry with a custom
|
||||
// one provided via the KUBE_TEST_REPO_LIST env variable
|
||||
func PatchContainerImages(containers []v1.Container) error {
|
||||
var err error
|
||||
for _, c := range containers {
|
||||
c.Image, err = imageutils.ReplaceRegistryInImageURL(c.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
84
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
84
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -18,6 +18,7 @@ package pod
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
@ -27,14 +28,13 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
|
||||
@ -54,18 +54,13 @@ const (
|
||||
podScheduledBeforeTimeout = podListTimeout + (20 * time.Second)
|
||||
|
||||
// podStartTimeout is how long to wait for the pod to be started.
|
||||
// Initial pod start can be delayed O(minutes) by slow docker pulls.
|
||||
// TODO: Make this 30 seconds once #4566 is resolved.
|
||||
podStartTimeout = 5 * time.Minute
|
||||
|
||||
// poll is how often to poll pods, nodes and claims.
|
||||
poll = 2 * time.Second
|
||||
pollShortTimeout = 1 * time.Minute
|
||||
pollLongTimeout = 5 * time.Minute
|
||||
poll = 2 * time.Second
|
||||
|
||||
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||
singleCallTimeout = 5 * time.Minute
|
||||
|
||||
// Some pods can take much longer to get ready due to volume attach/detach latency.
|
||||
@ -126,7 +121,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
// checked.
|
||||
replicas, replicaOk := int32(0), int32(0)
|
||||
|
||||
rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
|
||||
rcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
@ -139,7 +134,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
replicaOk += rc.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{})
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
@ -152,7 +147,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
replicaOk += rs.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
@ -212,9 +207,9 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
|
||||
return err
|
||||
}
|
||||
@ -254,8 +249,8 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
|
||||
}
|
||||
|
||||
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
|
||||
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
|
||||
}
|
||||
@ -276,12 +271,12 @@ func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string,
|
||||
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason.
|
||||
// Typically called to test that the passed-in pod is Pending and Unschedulable.
|
||||
func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "Unschedulable", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
return WaitForPodCondition(c, namespace, podName, v1.PodReasonUnschedulable, podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
// Only consider Failed pods. Successful pods will be deleted and detected in
|
||||
// waitForPodCondition's Get call returning `IsNotFound`
|
||||
if pod.Status.Phase == v1.PodPending {
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
|
||||
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@ -298,7 +293,7 @@ func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, name
|
||||
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts)
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -387,8 +382,8 @@ func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, nam
|
||||
// than "not found" then that error is returned and the wait stops.
|
||||
func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
if apierrs.IsNotFound(err) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil // done
|
||||
}
|
||||
if err != nil {
|
||||
@ -403,7 +398,7 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
e2elog.Logf("Waiting for pod %s to disappear", podName)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
@ -490,7 +485,7 @@ func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label label
|
||||
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err = c.CoreV1().Pods(ns).List(options)
|
||||
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
continue
|
||||
@ -536,55 +531,12 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// WaitForPodsInactive waits until there are no active pods left in the PodStore.
|
||||
// This is to make a fair comparison of deletion time between DeleteRCAndPods
|
||||
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
|
||||
// when the pod is inactvie.
|
||||
func WaitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var activePods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
pods := ps.List()
|
||||
activePods = controller.FilterActivePods(pods)
|
||||
if len(activePods) != 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range activePods {
|
||||
e2elog.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsGone waits until there are no pods left in the PodStore.
|
||||
func WaitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var pods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
if pods = ps.List(); len(pods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range pods {
|
||||
e2elog.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsReady waits for the pods to become ready.
|
||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
69
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
69
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
@ -17,13 +17,14 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -78,24 +79,20 @@ type PodClient struct {
|
||||
// Create creates a new pod according to the framework specifications (don't wait for it to start).
|
||||
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
|
||||
c.mungeSpec(pod)
|
||||
p, err := c.PodInterface.Create(pod)
|
||||
p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
ExpectNoError(err, "Error creating Pod")
|
||||
return p
|
||||
}
|
||||
|
||||
// CreateSyncInNamespace creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
|
||||
func (c *PodClient) CreateSyncInNamespace(pod *v1.Pod, namespace string) *v1.Pod {
|
||||
p := c.Create(pod)
|
||||
ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
|
||||
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
return p
|
||||
}
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications, and wait for it to start.
|
||||
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
|
||||
return c.CreateSyncInNamespace(pod, c.f.Namespace.Name)
|
||||
namespace := c.f.Namespace.Name
|
||||
p := c.Create(pod)
|
||||
ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
|
||||
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
return p
|
||||
}
|
||||
|
||||
// CreateBatch create a batch of pods. All pods are created before waiting.
|
||||
@ -115,21 +112,21 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
|
||||
}
|
||||
|
||||
// Update updates the pod object. It retries if there is a conflict, throw out error if
|
||||
// there is any other errors. name is the pod name, updateFn is the function updating the
|
||||
// there is any other apierrors. name is the pod name, updateFn is the function updating the
|
||||
// pod object.
|
||||
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
pod, err := c.PodInterface.Get(name, metav1.GetOptions{})
|
||||
pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
|
||||
}
|
||||
updateFn(pod)
|
||||
_, err = c.PodInterface.Update(pod)
|
||||
_, err = c.PodInterface.Update(context.TODO(), pod, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
Logf("Successfully updated pod %q", name)
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
if apierrors.IsConflict(err) {
|
||||
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
@ -139,15 +136,10 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
|
||||
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) {
|
||||
c.DeleteSyncInNamespace(name, c.f.Namespace.Name, options, timeout)
|
||||
}
|
||||
|
||||
// DeleteSyncInNamespace deletes the pod from the namespace and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options *metav1.DeleteOptions, timeout time.Duration) {
|
||||
err := c.Delete(name, options)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) {
|
||||
namespace := c.f.Namespace.Name
|
||||
err := c.Delete(context.TODO(), name, options)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
@ -194,7 +186,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
// TODO(random-liu): Move pod wait function into this file
|
||||
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -208,27 +200,10 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
)).To(gomega.Succeed(), "wait for pod %q to success", name)
|
||||
}
|
||||
|
||||
// WaitForFailure waits for pod to fail.
|
||||
func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
return true, nil
|
||||
case v1.PodSucceeded:
|
||||
return true, fmt.Errorf("pod %q successed with reason: %q, message: %q", name, pod.Status.Reason, pod.Status.Message)
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
},
|
||||
)).To(gomega.Succeed(), "wait for pod %q to fail", name)
|
||||
}
|
||||
|
||||
// WaitForFinish waits for pod to finish running, regardless of success or failure.
|
||||
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -252,7 +227,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
|
||||
}
|
||||
for _, e := range evnts.Items {
|
||||
switch e.Reason {
|
||||
case events.KillingContainer, events.FailedToCreateContainer, sysctl.UnsupportedReason, sysctl.ForbiddenReason:
|
||||
case events.KillingContainer, events.FailedToCreateContainer, sysctl.ForbiddenReason:
|
||||
ev = &e
|
||||
return true, nil
|
||||
case events.StartedContainer:
|
||||
@ -285,7 +260,7 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
|
||||
|
||||
// PodIsReady returns true if the specified pod is ready. Otherwise false.
|
||||
func (c *PodClient) PodIsReady(name string) bool {
|
||||
pod, err := c.Get(name, metav1.GetOptions{})
|
||||
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
return podutil.IsPodReady(pod)
|
||||
}
|
||||
|
215
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
215
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
@ -1,215 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultCPUProfileSeconds is default value for how long the CPU profile is gathered for.
|
||||
DefaultCPUProfileSeconds = 30
|
||||
)
|
||||
|
||||
func getProfilesDirectoryPath() string {
|
||||
return path.Join(TestContext.ReportDir, "profiles")
|
||||
}
|
||||
|
||||
func createProfilesDirectoryIfNeeded() error {
|
||||
profileDirPath := getProfilesDirectoryPath()
|
||||
if _, err := os.Stat(profileDirPath); os.IsNotExist(err) {
|
||||
if mkdirErr := os.Mkdir(profileDirPath, 0777); mkdirErr != nil {
|
||||
return fmt.Errorf("Failed to create profiles dir: %v", mkdirErr)
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Failed to check existence of profiles dir: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkProfileGatheringPrerequisites() error {
|
||||
if !TestContext.AllowGatheringProfiles {
|
||||
return fmt.Errorf("Can't gather profiles as --allow-gathering-profiles is false")
|
||||
}
|
||||
if TestContext.ReportDir == "" {
|
||||
return fmt.Errorf("Can't gather profiles as --report-dir is empty")
|
||||
}
|
||||
if err := createProfilesDirectoryIfNeeded(); err != nil {
|
||||
return fmt.Errorf("Failed to ensure profiles dir: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPortForComponent(componentName string) (int, error) {
|
||||
switch componentName {
|
||||
case "kube-apiserver":
|
||||
return 8080, nil
|
||||
case "kube-scheduler":
|
||||
return 10251, nil
|
||||
case "kube-controller-manager":
|
||||
return 10252, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Port for component %v unknown", componentName)
|
||||
}
|
||||
|
||||
// Gathers profiles from a master component through SSH. E.g usages:
|
||||
// - gatherProfile("kube-apiserver", "someTest", "heap")
|
||||
// - gatherProfile("kube-scheduler", "someTest", "profile")
|
||||
// - gatherProfile("kube-controller-manager", "someTest", "profile?seconds=20")
|
||||
//
|
||||
// We don't export this method but wrappers around it (see below).
|
||||
func gatherProfile(componentName, profileBaseName, profileKind string) error {
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
return fmt.Errorf("Profile gathering pre-requisite failed: %v", err)
|
||||
}
|
||||
profilePort, err := getPortForComponent(componentName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Profile gathering failed finding component port: %v", err)
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
// Get the profile data over SSH.
|
||||
getCommand := fmt.Sprintf("curl -s localhost:%v/debug/pprof/%s", profilePort, profileKind)
|
||||
sshResult, err := e2essh.SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
|
||||
}
|
||||
|
||||
profilePrefix := componentName
|
||||
switch {
|
||||
case profileKind == "heap":
|
||||
profilePrefix += "_MemoryProfile_"
|
||||
case strings.HasPrefix(profileKind, "profile"):
|
||||
profilePrefix += "_CPUProfile_"
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", profileKind)
|
||||
}
|
||||
|
||||
// Write the profile data to a file.
|
||||
rawprofilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pprof")
|
||||
rawprofile, err := os.Create(rawprofilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create file for the profile graph: %v", err)
|
||||
}
|
||||
defer rawprofile.Close()
|
||||
|
||||
if _, err := rawprofile.Write([]byte(sshResult.Stdout)); err != nil {
|
||||
return fmt.Errorf("Failed to write file with profile data: %v", err)
|
||||
}
|
||||
if err := rawprofile.Close(); err != nil {
|
||||
return fmt.Errorf("Failed to close file: %v", err)
|
||||
}
|
||||
// Create a graph from the data and write it to a pdf file.
|
||||
var cmd *exec.Cmd
|
||||
switch {
|
||||
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
|
||||
case profileKind == "heap":
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", rawprofile.Name())
|
||||
case strings.HasPrefix(profileKind, "profile"):
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", rawprofile.Name())
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", profileKind)
|
||||
}
|
||||
outfilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pdf")
|
||||
outfile, err := os.Create(outfilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create file for the profile graph: %v", err)
|
||||
}
|
||||
defer outfile.Close()
|
||||
cmd.Stdout = outfile
|
||||
stderr := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); nil != err {
|
||||
return fmt.Errorf("Failed to run 'go tool pprof': %v, stderr: %#v", err, stderr.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The below exposed functions can take a while to execute as they SSH to the master,
|
||||
// collect and copy the profile over and then graph it. To allow waiting for these to
|
||||
// finish before the parent goroutine itself finishes, we accept a sync.WaitGroup
|
||||
// argument in these functions. Typically you would use the following pattern:
|
||||
//
|
||||
// func TestFoo() {
|
||||
// var wg sync.WaitGroup
|
||||
// wg.Add(3)
|
||||
// go framework.GatherCPUProfile("kube-apiserver", "before_foo", &wg)
|
||||
// go framework.GatherMemoryProfile("kube-apiserver", "before_foo", &wg)
|
||||
// <<<< some code doing foo >>>>>>
|
||||
// go framework.GatherCPUProfile("kube-scheduler", "after_foo", &wg)
|
||||
// wg.Wait()
|
||||
// }
|
||||
//
|
||||
// If you do not wish to exercise the waiting logic, pass a nil value for the
|
||||
// waitgroup argument instead. However, then you would be responsible for ensuring
|
||||
// that the function finishes. There's also a polling-based gatherer utility for
|
||||
// CPU profiles available below.
|
||||
|
||||
// GatherCPUProfile gathers CPU profile.
|
||||
func GatherCPUProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
|
||||
GatherCPUProfileForSeconds(componentName, profileBaseName, DefaultCPUProfileSeconds, wg)
|
||||
}
|
||||
|
||||
// GatherCPUProfileForSeconds gathers CPU profile for specified seconds.
|
||||
func GatherCPUProfileForSeconds(componentName string, profileBaseName string, seconds int, wg *sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := gatherProfile(componentName, profileBaseName, fmt.Sprintf("profile?seconds=%v", seconds)); err != nil {
|
||||
Logf("Failed to gather %v CPU profile: %v", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// GatherMemoryProfile gathers memory profile.
|
||||
func GatherMemoryProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := gatherProfile(componentName, profileBaseName, "heap"); err != nil {
|
||||
Logf("Failed to gather %v memory profile: %v", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// StartCPUProfileGatherer performs polling-based gathering of the component's CPU
|
||||
// profile. It takes the interval b/w consecutive gatherings as an argument and
|
||||
// starts the gathering goroutine. To stop the gatherer, close the returned channel.
|
||||
func StartCPUProfileGatherer(componentName string, profileBaseName string, interval time.Duration) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(interval):
|
||||
GatherCPUProfile(componentName, profileBaseName+"_"+time.Now().Format(time.RFC3339), nil)
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return stopCh
|
||||
}
|
20
vendor/k8s.io/kubernetes/test/e2e/framework/psp.go
generated
vendored
20
vendor/k8s.io/kubernetes/test/e2e/framework/psp.go
generated
vendored
@ -17,13 +17,14 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
@ -83,7 +84,7 @@ func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
|
||||
// IsPodSecurityPolicyEnabled returns true if PodSecurityPolicy is enabled. Otherwise false.
|
||||
func IsPodSecurityPolicyEnabled(kubeClient clientset.Interface) bool {
|
||||
isPSPEnabledOnce.Do(func() {
|
||||
psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{})
|
||||
psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
|
||||
isPSPEnabled = false
|
||||
@ -109,23 +110,22 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
|
||||
}
|
||||
// Create the privileged PSP & role
|
||||
privilegedPSPOnce.Do(func() {
|
||||
_, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(
|
||||
podSecurityPolicyPrivileged, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
_, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), podSecurityPolicyPrivileged, metav1.GetOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
// Privileged PSP was already created.
|
||||
ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged)
|
||||
return
|
||||
}
|
||||
|
||||
psp := privilegedPSP(podSecurityPolicyPrivileged)
|
||||
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp)
|
||||
if !apierrs.IsAlreadyExists(err) {
|
||||
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{})
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
}
|
||||
|
||||
if auth.IsRBACEnabled(kubeClient.RbacV1()) {
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = kubeClient.RbacV1().ClusterRoles().Create(&rbacv1.ClusterRole{
|
||||
_, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
@ -133,8 +133,8 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
|
||||
ResourceNames: []string{podSecurityPolicyPrivileged},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
if !apierrs.IsAlreadyExists(err) {
|
||||
}, metav1.CreateOptions{})
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/pv/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/pv/BUILD
generated
vendored
@ -16,8 +16,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
115
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
115
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
@ -17,12 +17,13 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -31,8 +32,7 @@ import (
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -143,7 +143,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
|
||||
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
|
||||
}
|
||||
} else {
|
||||
e2elog.Logf("pvc is nil")
|
||||
framework.Logf("pvc is nil")
|
||||
}
|
||||
if pv != nil {
|
||||
err := DeletePersistentVolume(c, pv.Name)
|
||||
@ -151,7 +151,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
|
||||
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
|
||||
}
|
||||
} else {
|
||||
e2elog.Logf("pv is nil")
|
||||
framework.Logf("pv is nil")
|
||||
}
|
||||
return errs
|
||||
}
|
||||
@ -185,9 +185,9 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
|
||||
// DeletePersistentVolume deletes the PV.
|
||||
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
|
||||
if c != nil && len(pvName) > 0 {
|
||||
e2elog.Logf("Deleting PersistentVolume %q", pvName)
|
||||
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
framework.Logf("Deleting PersistentVolume %q", pvName)
|
||||
err := c.CoreV1().PersistentVolumes().Delete(context.TODO(), pvName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("PV Delete API error: %v", err)
|
||||
}
|
||||
}
|
||||
@ -197,9 +197,9 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error {
|
||||
// DeletePersistentVolumeClaim deletes the Claim.
|
||||
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
|
||||
if c != nil && len(pvcName) > 0 {
|
||||
e2elog.Logf("Deleting PersistentVolumeClaim %q", pvcName)
|
||||
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
|
||||
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("PVC Delete API error: %v", err)
|
||||
}
|
||||
}
|
||||
@ -211,21 +211,21 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
|
||||
// phase value to expect for the pv bound to the to-be-deleted claim.
|
||||
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
pvname := pvc.Spec.VolumeName
|
||||
e2elog.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
||||
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
||||
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the PV's phase to return to be `expectPVPhase`
|
||||
e2elog.Logf("Waiting for reclaim process to complete.")
|
||||
framework.Logf("Waiting for reclaim process to complete.")
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
|
||||
}
|
||||
|
||||
// examine the pv's ClaimRef and UID and compare to expected values
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
@ -243,7 +243,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
}
|
||||
}
|
||||
|
||||
e2elog.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
|
||||
framework.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -256,7 +256,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
|
||||
var boundPVs, deletedPVCs int
|
||||
|
||||
for pvName := range pvols {
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
@ -271,15 +271,15 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
|
||||
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
|
||||
}
|
||||
// get the pvc for the delete call below
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !apierrs.IsNotFound(err) {
|
||||
} else if !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("PVC Get API error: %v", err)
|
||||
}
|
||||
// delete pvckey from map even if apierrs.IsNotFound above is true and thus the
|
||||
// delete pvckey from map even if apierrors.IsNotFound above is true and thus the
|
||||
// claim was not actually deleted here
|
||||
delete(claims, pvcKey)
|
||||
deletedPVCs++
|
||||
@ -293,7 +293,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
|
||||
|
||||
// create the PV resource. Fails test on error.
|
||||
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||
pv, err := c.CoreV1().PersistentVolumes().Create(pv)
|
||||
pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PV Create API error: %v", err)
|
||||
}
|
||||
@ -307,7 +307,7 @@ func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol
|
||||
|
||||
// CreatePVC creates the PVC resource. Fails test on error.
|
||||
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PVC Create API error: %v", err)
|
||||
}
|
||||
@ -360,7 +360,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
|
||||
if preBind {
|
||||
preBindMsg = " pre-bound"
|
||||
}
|
||||
e2elog.Logf("Creating a PV followed by a%s PVC", preBindMsg)
|
||||
framework.Logf("Creating a PV followed by a%s PVC", preBindMsg)
|
||||
|
||||
// make the pv and pvc definitions
|
||||
pv := MakePersistentVolume(pvConfig)
|
||||
@ -433,7 +433,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
|
||||
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
|
||||
// Wait for newly created PVC to bind to the PV
|
||||
e2elog.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
|
||||
@ -447,11 +447,11 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
|
||||
}
|
||||
|
||||
// Re-get the pv and pvc objects
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC Get API error: %v", err)
|
||||
}
|
||||
@ -489,15 +489,15 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
for pvName := range pvols {
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout)
|
||||
if err != nil && len(pvols) > len(claims) {
|
||||
e2elog.Logf("WARN: pv %v is not bound after max wait", pvName)
|
||||
e2elog.Logf(" This may be ok since there are more pvs than pvcs")
|
||||
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
|
||||
framework.Logf(" This may be ok since there are more pvs than pvcs")
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err)
|
||||
}
|
||||
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
@ -604,7 +604,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
|
||||
}
|
||||
|
||||
if cfg.VolumeMode != nil && *cfg.VolumeMode == "" {
|
||||
e2elog.Logf("Warning: Making PVC: VolumeMode specified as invalid empty string, treating as nil")
|
||||
framework.Logf("Warning: Making PVC: VolumeMode specified as invalid empty string, treating as nil")
|
||||
cfg.VolumeMode = nil
|
||||
}
|
||||
|
||||
@ -634,10 +634,10 @@ func createPDWithRetry(zone string) (string, error) {
|
||||
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
|
||||
newDiskName, err = createPD(zone)
|
||||
if err != nil {
|
||||
e2elog.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
|
||||
framework.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
|
||||
continue
|
||||
}
|
||||
e2elog.Logf("Successfully created a new PD: %q.", newDiskName)
|
||||
framework.Logf("Successfully created a new PD: %q.", newDiskName)
|
||||
return newDiskName, nil
|
||||
}
|
||||
return "", err
|
||||
@ -659,10 +659,10 @@ func DeletePDWithRetry(diskName string) error {
|
||||
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
|
||||
err = deletePD(diskName)
|
||||
if err != nil {
|
||||
e2elog.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
|
||||
framework.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
|
||||
continue
|
||||
}
|
||||
e2elog.Logf("Successfully deleted PD %q.", diskName)
|
||||
framework.Logf("Successfully deleted PD %q.", diskName)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
|
||||
@ -679,12 +679,6 @@ func deletePD(pdName string) error {
|
||||
return framework.TestContext.CloudConfig.Provider.DeletePD(pdName)
|
||||
}
|
||||
|
||||
// MakeWritePod returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name.
|
||||
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
||||
return e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
|
||||
}
|
||||
|
||||
// WaitForPVClaimBoundPhase waits until all pvcs phase set to bound
|
||||
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) {
|
||||
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))
|
||||
@ -695,12 +689,12 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
|
||||
return persistentvolumes, err
|
||||
}
|
||||
// Get new copy of the claim
|
||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err)
|
||||
}
|
||||
// Get the bounded PV
|
||||
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
@ -710,18 +704,18 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
|
||||
|
||||
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
|
||||
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
|
||||
e2elog.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
|
||||
framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
|
||||
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
|
||||
continue
|
||||
}
|
||||
if pv.Status.Phase == phase {
|
||||
e2elog.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
|
||||
framework.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
|
||||
return nil
|
||||
}
|
||||
e2elog.Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
|
||||
framework.Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
|
||||
}
|
||||
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
|
||||
}
|
||||
@ -737,22 +731,22 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
|
||||
if len(pvcNames) == 0 {
|
||||
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
|
||||
}
|
||||
e2elog.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
|
||||
framework.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||
phaseFoundInAllClaims := true
|
||||
for _, pvcName := range pvcNames {
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
|
||||
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
|
||||
continue
|
||||
}
|
||||
if pvc.Status.Phase == phase {
|
||||
e2elog.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
|
||||
framework.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
|
||||
if matchAny {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
e2elog.Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
|
||||
framework.Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
|
||||
phaseFoundInAllClaims = false
|
||||
}
|
||||
}
|
||||
@ -777,22 +771,9 @@ func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
return framework.TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
|
||||
}
|
||||
|
||||
// GetBoundPV returns a PV details.
|
||||
func GetBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
|
||||
// Get new copy of the claim
|
||||
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the bound PV
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
return pv, err
|
||||
}
|
||||
|
||||
// GetDefaultStorageClassName returns default storageClass or return error
|
||||
func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
|
||||
list, err := c.StorageV1().StorageClasses().List(metav1.ListOptions{})
|
||||
list, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error listing storage classes: %v", err)
|
||||
}
|
||||
@ -808,7 +789,7 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
|
||||
if len(scName) == 0 {
|
||||
return "", fmt.Errorf("No default storage class found")
|
||||
}
|
||||
e2elog.Logf("Default storage class: %q", scName)
|
||||
framework.Logf("Default storage class: %q", scName)
|
||||
return scName, nil
|
||||
}
|
||||
|
||||
@ -816,6 +797,6 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
|
||||
func SkipIfNoDefaultStorageClass(c clientset.Interface) {
|
||||
_, err := GetDefaultStorageClassName(c)
|
||||
if err != nil {
|
||||
framework.Skipf("error finding default storageClass : %v", err)
|
||||
e2eskipper.Skipf("error finding default storageClass : %v", err)
|
||||
}
|
||||
}
|
||||
|
86
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
86
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
@ -1,86 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// RcByNameContainer returns a ReplicationController with specified name and container
|
||||
func RcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
|
||||
gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
zeroGracePeriod := int64(0)
|
||||
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = &zeroGracePeriod
|
||||
}
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{c},
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
|
||||
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
// ScaleRC scales Replication Controller to be desired size.
|
||||
func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.SchemeGroupVersion.WithResource("replicationcontrollers"))
|
||||
}
|
||||
|
||||
// RunRC Launches (and verifies correctness) of a Replication Controller
|
||||
// and will wait for all pods it spawns to become "Running".
|
||||
func RunRC(config testutils.RCConfig) error {
|
||||
ginkgo.By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunRC(config)
|
||||
}
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/resource/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/resource/BUILD
generated
vendored
@ -6,10 +6,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/resource",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
35
vendor/k8s.io/kubernetes/test/e2e/framework/resource/runtimeobj.go
generated
vendored
35
vendor/k8s.io/kubernetes/test/e2e/framework/resource/runtimeobj.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package resource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@ -28,26 +29,32 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
var (
|
||||
kindReplicationController = schema.GroupKind{Kind: "ReplicationController"}
|
||||
kindExtensionsReplicaSet = schema.GroupKind{Group: "extensions", Kind: "ReplicaSet"}
|
||||
kindAppsReplicaSet = schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}
|
||||
kindExtensionsDeployment = schema.GroupKind{Group: "extensions", Kind: "Deployment"}
|
||||
kindAppsDeployment = schema.GroupKind{Group: "apps", Kind: "Deployment"}
|
||||
kindExtensionsDaemonSet = schema.GroupKind{Group: "extensions", Kind: "DaemonSet"}
|
||||
kindBatchJob = schema.GroupKind{Group: "batch", Kind: "Job"}
|
||||
)
|
||||
|
||||
// GetRuntimeObjectForKind returns a runtime.Object based on its GroupKind,
|
||||
// namespace and name.
|
||||
func GetRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
|
||||
switch kind {
|
||||
case api.Kind("ReplicationController"):
|
||||
return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
|
||||
return c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
|
||||
return c.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("DaemonSet"):
|
||||
return c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
case batchinternal.Kind("Job"):
|
||||
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
case kindReplicationController:
|
||||
return c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
case kindExtensionsReplicaSet, kindAppsReplicaSet:
|
||||
return c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
case kindExtensionsDeployment, kindAppsDeployment:
|
||||
return c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
case kindExtensionsDaemonSet:
|
||||
return c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
case kindBatchJob:
|
||||
return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
|
||||
}
|
||||
|
80
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
80
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
@ -36,6 +37,9 @@ import (
|
||||
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/system"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
// ResourceConstraint is a struct to hold constraints.
|
||||
@ -70,9 +74,6 @@ type ResourceUsagePerContainer map[string]*ContainerResourceUsage
|
||||
// we can't have int here, as JSON does not accept integer keys.
|
||||
type ResourceUsageSummary map[string][]SingleContainerSummary
|
||||
|
||||
// NoCPUConstraint is the number of constraint for CPU.
|
||||
const NoCPUConstraint = math.MaxFloat64
|
||||
|
||||
// PrintHumanReadable prints resource usage summary in human readable.
|
||||
func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
||||
buf := &bytes.Buffer{}
|
||||
@ -183,8 +184,8 @@ type resourceGatherWorker struct {
|
||||
func (w *resourceGatherWorker) singleProbe() {
|
||||
data := make(ResourceUsagePerContainer)
|
||||
if w.inKubemark {
|
||||
kubemarkData := GetKubemarkMasterComponentsResourceUsage()
|
||||
if data == nil {
|
||||
kubemarkData := getKubemarkMasterComponentsResourceUsage()
|
||||
if kubemarkData == nil {
|
||||
return
|
||||
}
|
||||
for k, v := range kubemarkData {
|
||||
@ -293,12 +294,11 @@ func getStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alp
|
||||
defer cancel()
|
||||
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
Do().Raw()
|
||||
Do(ctx).Raw()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -397,7 +397,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
// Tracks kube-system pods if no valid PodList is passed in.
|
||||
var err error
|
||||
if pods == nil {
|
||||
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
|
||||
pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Pods: %v", err)
|
||||
return nil, err
|
||||
@ -421,7 +421,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
dnsNodes[pod.Spec.NodeName] = true
|
||||
}
|
||||
}
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Nodes: %v", err)
|
||||
return nil, err
|
||||
@ -556,3 +556,65 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
||||
}
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
// kubemarkResourceUsage is a struct for tracking the resource usage of kubemark.
|
||||
type kubemarkResourceUsage struct {
|
||||
Name string
|
||||
MemoryWorkingSetInBytes uint64
|
||||
CPUUsageInCores float64
|
||||
}
|
||||
|
||||
func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return sshResult.Stdout, nil
|
||||
}
|
||||
|
||||
// getKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name.
|
||||
func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsage {
|
||||
result := make(map[string]*kubemarkResourceUsage)
|
||||
// Get kubernetes component resource usage
|
||||
sshResult, err := getMasterUsageByPrefix("kube")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
return nil
|
||||
}
|
||||
scanner := bufio.NewScanner(strings.NewReader(sshResult))
|
||||
for scanner.Scan() {
|
||||
var cpu float64
|
||||
var mem uint64
|
||||
var name string
|
||||
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /usr/local/bin/kube-%s", &cpu, &mem, &name)
|
||||
if name != "" {
|
||||
// Gatherer expects pod_name/container_name format
|
||||
fullName := name + "/" + name
|
||||
result[fullName] = &kubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
|
||||
}
|
||||
}
|
||||
// Get etcd resource usage
|
||||
sshResult, err = getMasterUsageByPrefix("bin/etcd")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||
return nil
|
||||
}
|
||||
scanner = bufio.NewScanner(strings.NewReader(sshResult))
|
||||
for scanner.Scan() {
|
||||
var cpu float64
|
||||
var mem uint64
|
||||
var etcdKind string
|
||||
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /bin/sh -c /usr/local/bin/etcd", &cpu, &mem)
|
||||
dataDirStart := strings.Index(scanner.Text(), "--data-dir")
|
||||
if dataDirStart < 0 {
|
||||
continue
|
||||
}
|
||||
fmt.Sscanf(scanner.Text()[dataDirStart:], "--data-dir=/var/%s", &etcdKind)
|
||||
if etcdKind != "" {
|
||||
// Gatherer expects pod_name/container_name format
|
||||
fullName := "etcd/" + etcdKind
|
||||
result[fullName] = &kubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
36
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/BUILD
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/BUILD
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["skipper.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/skipper",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -14,12 +14,21 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package skipper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
@ -28,16 +37,83 @@ import (
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
// TestContext should be used by all tests to access common context data.
|
||||
var TestContext framework.TestContextType
|
||||
|
||||
func skipInternalf(caller int, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
log("INFO", msg)
|
||||
ginkgowrapper.Skip(msg, caller+1)
|
||||
framework.Logf(msg)
|
||||
skip(msg, caller+1)
|
||||
}
|
||||
|
||||
// SkipPanic is the value that will be panicked from Skip.
|
||||
type SkipPanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
// String makes SkipPanic look like the old Ginkgo panic when printed.
|
||||
func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC }
|
||||
|
||||
// Skip wraps ginkgo.Skip so that it panics with more useful
|
||||
// information about why the test is being skipped. This function will
|
||||
// panic with a SkipPanic.
|
||||
func skip(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
sp := SkipPanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(sp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Skip(message, skip)
|
||||
}
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
stack := debug.Stack()
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
|
||||
var prunedStack []string
|
||||
|
||||
// skip the top of the stack
|
||||
for i := 0; i < 2*skip+1; i++ {
|
||||
scanner.Scan()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if stackSkipPattern.Match(scanner.Bytes()) {
|
||||
scanner.Scan() // these come in pairs
|
||||
} else {
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
scanner.Scan() // these come in pairs
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
||||
|
||||
// Skipf skips with information about why the test is being skipped.
|
||||
@ -62,13 +138,13 @@ func SkipUnlessLocalEphemeralStorageEnabled() {
|
||||
// SkipIfMissingResource skips if the gvr resource is missing.
|
||||
func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)
|
||||
_, err := resourceClient.List(metav1.ListOptions{})
|
||||
_, err := resourceClient.List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
// not all resources support list, so we ignore those
|
||||
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
|
||||
if apierrors.IsMethodNotSupported(err) || apierrors.IsNotFound(err) || apierrors.IsForbidden(err) {
|
||||
skipInternalf(1, "Could not find %s resource, skipping test: %#v", gvr, err)
|
||||
}
|
||||
Failf("Unexpected error getting %v: %v", gvr, err)
|
||||
framework.Failf("Unexpected error getting %v: %v", gvr, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,21 +164,21 @@ func SkipUnlessNodeCountIsAtMost(maxNodeCount int) {
|
||||
|
||||
// SkipIfProviderIs skips if the provider is included in the unsupportedProviders.
|
||||
func SkipIfProviderIs(unsupportedProviders ...string) {
|
||||
if ProviderIs(unsupportedProviders...) {
|
||||
if framework.ProviderIs(unsupportedProviders...) {
|
||||
skipInternalf(1, "Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessProviderIs skips if the provider is not included in the supportedProviders.
|
||||
func SkipUnlessProviderIs(supportedProviders ...string) {
|
||||
if !ProviderIs(supportedProviders...) {
|
||||
if !framework.ProviderIs(supportedProviders...) {
|
||||
skipInternalf(1, "Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessMultizone skips if the cluster does not have multizone.
|
||||
func SkipUnlessMultizone(c clientset.Interface) {
|
||||
zones, err := GetClusterZones(c)
|
||||
zones, err := framework.GetClusterZones(c)
|
||||
if err != nil {
|
||||
skipInternalf(1, "Error listing cluster zones")
|
||||
}
|
||||
@ -113,7 +189,7 @@ func SkipUnlessMultizone(c clientset.Interface) {
|
||||
|
||||
// SkipIfMultizone skips if the cluster has multizone.
|
||||
func SkipIfMultizone(c clientset.Interface) {
|
||||
zones, err := GetClusterZones(c)
|
||||
zones, err := framework.GetClusterZones(c)
|
||||
if err != nil {
|
||||
skipInternalf(1, "Error listing cluster zones")
|
||||
}
|
||||
@ -124,21 +200,21 @@ func SkipIfMultizone(c clientset.Interface) {
|
||||
|
||||
// SkipUnlessMasterOSDistroIs skips if the master OS distro is not included in the supportedMasterOsDistros.
|
||||
func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) {
|
||||
if !MasterOSDistroIs(supportedMasterOsDistros...) {
|
||||
if !framework.MasterOSDistroIs(supportedMasterOsDistros...) {
|
||||
skipInternalf(1, "Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessNodeOSDistroIs skips if the node OS distro is not included in the supportedNodeOsDistros.
|
||||
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
|
||||
if !NodeOSDistroIs(supportedNodeOsDistros...) {
|
||||
if !framework.NodeOSDistroIs(supportedNodeOsDistros...) {
|
||||
skipInternalf(1, "Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfNodeOSDistroIs skips if the node OS distro is included in the unsupportedNodeOsDistros.
|
||||
func SkipIfNodeOSDistroIs(unsupportedNodeOsDistros ...string) {
|
||||
if NodeOSDistroIs(unsupportedNodeOsDistros...) {
|
||||
if framework.NodeOSDistroIs(unsupportedNodeOsDistros...) {
|
||||
skipInternalf(1, "Not supported for node OS distro %v (is %s)", unsupportedNodeOsDistros, TestContext.NodeOSDistro)
|
||||
}
|
||||
}
|
||||
@ -147,7 +223,7 @@ func SkipIfNodeOSDistroIs(unsupportedNodeOsDistros ...string) {
|
||||
func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) {
|
||||
gte, err := serverVersionGTE(v, c)
|
||||
if err != nil {
|
||||
Failf("Failed to get server version: %v", err)
|
||||
framework.Failf("Failed to get server version: %v", err)
|
||||
}
|
||||
if !gte {
|
||||
skipInternalf(1, "Not supported for server versions before %q", v)
|
||||
@ -161,10 +237,7 @@ func SkipUnlessSSHKeyPresent() {
|
||||
}
|
||||
}
|
||||
|
||||
// serverVersionGTE returns true if v is greater than or equal to the server
|
||||
// version.
|
||||
//
|
||||
// TODO(18726): This should be incorporated into client.VersionInterface.
|
||||
// serverVersionGTE returns true if v is greater than or equal to the server version.
|
||||
func serverVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
|
||||
serverVersion, err := c.ServerVersion()
|
||||
if err != nil {
|
||||
@ -184,3 +257,23 @@ var AppArmorDistros = []string{"gci", "ubuntu"}
|
||||
func SkipIfAppArmorNotSupported() {
|
||||
SkipUnlessNodeOSDistroIs(AppArmorDistros...)
|
||||
}
|
||||
|
||||
// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes.
|
||||
func RunIfContainerRuntimeIs(runtimes ...string) {
|
||||
for _, containerRuntime := range runtimes {
|
||||
if containerRuntime == TestContext.ContainerRuntime {
|
||||
return
|
||||
}
|
||||
}
|
||||
skipInternalf(1, "Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes)
|
||||
}
|
||||
|
||||
// RunIfSystemSpecNameIs runs if the system spec name is included in the names.
|
||||
func RunIfSystemSpecNameIs(names ...string) {
|
||||
for _, name := range names {
|
||||
if name == TestContext.SystemSpecName {
|
||||
return
|
||||
}
|
||||
}
|
||||
skipInternalf(1, "Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
|
||||
}
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -18,6 +18,7 @@ package ssh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
@ -45,7 +46,6 @@ const (
|
||||
|
||||
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||
singleCallTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
@ -319,7 +319,7 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||
var nodes *v1.NodeList
|
||||
var err error
|
||||
if wait.PollImmediate(pollNodeInterval, singleCallTimeout, func() (bool, error) {
|
||||
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
|
86
vendor/k8s.io/kubernetes/test/e2e/framework/suites.go
generated
vendored
86
vendor/k8s.io/kubernetes/test/e2e/framework/suites.go
generated
vendored
@ -1,86 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
)
|
||||
|
||||
// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step.
|
||||
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
|
||||
// Here, the order of functions is reversed; first, the function which runs everywhere,
|
||||
// and then the function that only runs on the first Ginkgo node.
|
||||
func CleanupSuite() {
|
||||
// Run on all Ginkgo nodes
|
||||
Logf("Running AfterSuite actions on all nodes")
|
||||
RunCleanupActions()
|
||||
}
|
||||
|
||||
// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite
|
||||
func AfterSuiteActions() {
|
||||
// Run only Ginkgo on node 1
|
||||
Logf("Running AfterSuite actions on node 1")
|
||||
if TestContext.ReportDir != "" {
|
||||
CoreDump(TestContext.ReportDir)
|
||||
}
|
||||
if TestContext.GatherSuiteMetricsAfterTest {
|
||||
if err := gatherTestSuiteMetrics(); err != nil {
|
||||
Logf("Error gathering metrics: %v", err)
|
||||
}
|
||||
}
|
||||
if TestContext.NodeKiller.Enabled {
|
||||
close(TestContext.NodeKiller.NodeKillerStopCh)
|
||||
}
|
||||
}
|
||||
|
||||
func gatherTestSuiteMetrics() error {
|
||||
Logf("Gathering metrics")
|
||||
c, err := LoadClientset()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading client: %v", err)
|
||||
}
|
||||
|
||||
// Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally).
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(c, nil, !ProviderIs("kubemark"), true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MetricsGrabber: %v", err)
|
||||
}
|
||||
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to grab metrics: %v", err)
|
||||
}
|
||||
|
||||
metricsForE2E := (*e2emetrics.ComponentCollection)(&received)
|
||||
metricsJSON := metricsForE2E.PrintJSON()
|
||||
if TestContext.ReportDir != "" {
|
||||
filePath := path.Join(TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
|
||||
if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil {
|
||||
return fmt.Errorf("error writing to %q: %v", filePath, err)
|
||||
}
|
||||
} else {
|
||||
Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
12
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -149,8 +149,6 @@ type TestContextType struct {
|
||||
FeatureGates map[string]bool
|
||||
// Node e2e specific test context
|
||||
NodeTestContextType
|
||||
// Monitoring solution that is used in current cluster.
|
||||
ClusterMonitoringMode string
|
||||
|
||||
// Indicates what path the kubernetes-anywhere is installed on
|
||||
KubernetesAnywherePath string
|
||||
@ -169,6 +167,12 @@ type TestContextType struct {
|
||||
|
||||
// ProgressReportURL is the URL which progress updates will be posted to as tests complete. If empty, no updates are sent.
|
||||
ProgressReportURL string
|
||||
|
||||
// SriovdpConfigMapFile is the path to the ConfigMap to configure the SRIOV device plugin on this host.
|
||||
SriovdpConfigMapFile string
|
||||
|
||||
// SpecSummaryOutput is the file to write ginkgo.SpecSummary objects to as tests complete. Useful for debugging and test introspection.
|
||||
SpecSummaryOutput string
|
||||
}
|
||||
|
||||
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
|
||||
@ -297,6 +301,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
|
||||
|
||||
flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.")
|
||||
flags.StringVar(&TestContext.SpecSummaryOutput, "spec-dump", "", "The file to dump all ginkgo.SpecSummary to after tests run. If empty, no objects are saved/printed.")
|
||||
}
|
||||
|
||||
// RegisterClusterFlags registers flags specific to the cluster e2e test suite.
|
||||
@ -315,7 +320,6 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
|
||||
flags.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, ubuntu, gci, coreos, or custom).")
|
||||
flags.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, or custom).")
|
||||
flags.StringVar(&TestContext.ClusterMonitoringMode, "cluster-monitoring-mode", "standalone", "The monitoring solution that is used in the cluster.")
|
||||
flags.StringVar(&TestContext.ClusterDNSDomain, "dns-domain", "cluster.local", "The DNS Domain of the cluster.")
|
||||
|
||||
// TODO: Flags per provider? Rename gce-project/gce-zone?
|
||||
@ -336,7 +340,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&cloudConfig.MasterTag, "master-tag", "", "Network tags used on master instances. Valid only for gce, gke")
|
||||
|
||||
flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
|
||||
flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure.")
|
||||
flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.")
|
||||
flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
|
||||
flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
|
||||
flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
|
||||
|
319
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
319
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -30,7 +30,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -39,6 +38,7 @@ import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/websocket"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -47,7 +47,7 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -75,6 +75,7 @@ import (
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@ -86,8 +87,6 @@ const (
|
||||
// PodListTimeout is how long to wait for the pod to be listable.
|
||||
PodListTimeout = time.Minute
|
||||
// PodStartTimeout is how long to wait for the pod to be started.
|
||||
// Initial pod start can be delayed O(minutes) by slow docker pulls.
|
||||
// TODO: Make this 30 seconds once #4566 is resolved.
|
||||
PodStartTimeout = 5 * time.Minute
|
||||
|
||||
// PodStartShortTimeout is same as `PodStartTimeout` to wait for the pod to be started, but shorter.
|
||||
@ -104,12 +103,6 @@ const (
|
||||
// PodEventTimeout is how much we wait for a pod event to occur.
|
||||
PodEventTimeout = 2 * time.Minute
|
||||
|
||||
// NamespaceCleanupTimeout is how long to wait for the namespace to be deleted.
|
||||
// If there are any orphaned namespaces to clean up, this test is running
|
||||
// on a long lived cluster. A long wait here is preferably to spurious test
|
||||
// failures caused by leaked resources from a previous test run.
|
||||
NamespaceCleanupTimeout = 15 * time.Minute
|
||||
|
||||
// ServiceStartTimeout is how long to wait for a service endpoint to be resolvable.
|
||||
ServiceStartTimeout = 3 * time.Minute
|
||||
|
||||
@ -126,7 +119,6 @@ const (
|
||||
|
||||
// SingleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||
SingleCallTimeout = 5 * time.Minute
|
||||
|
||||
// NodeReadyInitialTimeout is how long nodes have to be "ready" when a test begins. They should already
|
||||
@ -140,12 +132,8 @@ const (
|
||||
// Use it case by case when we are sure this timeout is enough.
|
||||
ClaimProvisionShortTimeout = 1 * time.Minute
|
||||
|
||||
// ClaimDeletingTimeout is How long claims have to become deleted.
|
||||
ClaimDeletingTimeout = 3 * time.Minute
|
||||
|
||||
// RecreateNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is recreated before
|
||||
// the test is considered failed.
|
||||
RecreateNodeReadyAgainTimeout = 10 * time.Minute
|
||||
// ClaimProvisionTimeout is how long claims have to become dynamically provisioned.
|
||||
ClaimProvisionTimeout = 5 * time.Minute
|
||||
|
||||
// RestartNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is restarted before
|
||||
// the test is considered failed.
|
||||
@ -173,9 +161,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// ClaimProvisionTimeout is how long claims have to become dynamically provisioned.
|
||||
ClaimProvisionTimeout = 5 * time.Minute
|
||||
|
||||
// BusyBoxImage is the image URI of BusyBox.
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
|
||||
@ -203,26 +188,6 @@ func GetMasterHost() string {
|
||||
return masterURL.Hostname()
|
||||
}
|
||||
|
||||
// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes.
|
||||
func RunIfContainerRuntimeIs(runtimes ...string) {
|
||||
for _, containerRuntime := range runtimes {
|
||||
if containerRuntime == TestContext.ContainerRuntime {
|
||||
return
|
||||
}
|
||||
}
|
||||
skipInternalf(1, "Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes)
|
||||
}
|
||||
|
||||
// RunIfSystemSpecNameIs runs if the system spec name is included in the names.
|
||||
func RunIfSystemSpecNameIs(names ...string) {
|
||||
for _, name := range names {
|
||||
if name == TestContext.SystemSpecName {
|
||||
return
|
||||
}
|
||||
}
|
||||
skipInternalf(1, "Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
|
||||
}
|
||||
|
||||
// ProviderIs returns true if the provider is included is the providers. Otherwise false.
|
||||
func ProviderIs(providers ...string) bool {
|
||||
for _, provider := range providers {
|
||||
@ -253,44 +218,12 @@ func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if strings.Contains(container.Name, containerNameSubstr) {
|
||||
// Contains() matches all strings if substr is empty
|
||||
logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
|
||||
if err != nil {
|
||||
logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
|
||||
if err != nil {
|
||||
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
|
||||
}
|
||||
}
|
||||
logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)
|
||||
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LogFailedContainers runs `kubectl logs` on a failed containers.
|
||||
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
|
||||
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
logFunc("Error getting pods in namespace '%s': %v", ns, err)
|
||||
return
|
||||
}
|
||||
logFunc("Running kubectl logs on non-ready containers in %v", ns)
|
||||
for _, pod := range podList.Items {
|
||||
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
|
||||
kubectlLogPod(c, pod, "", Logf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
|
||||
// Filter is by simple strings.Contains; first skip filter, then delete filter.
|
||||
// Returns the list of deleted namespaces or an error.
|
||||
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
|
||||
ginkgo.By("Deleting namespaces")
|
||||
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||
nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||
ExpectNoError(err, "Failed to get namespace list")
|
||||
var deleted []string
|
||||
var wg sync.WaitGroup
|
||||
@ -320,7 +253,7 @@ OUTER:
|
||||
go func(nsName string) {
|
||||
defer wg.Done()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
gomega.Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(gomega.Succeed())
|
||||
gomega.Expect(c.CoreV1().Namespaces().Delete(context.TODO(), nsName, metav1.DeleteOptions{})).To(gomega.Succeed())
|
||||
Logf("namespace : %v api call to delete is complete ", nsName)
|
||||
}(item.Name)
|
||||
}
|
||||
@ -338,7 +271,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou
|
||||
//Now POLL until all namespaces have been eradicated.
|
||||
return wait.Poll(2*time.Second, timeout,
|
||||
func() (bool, error) {
|
||||
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||
nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -352,7 +285,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou
|
||||
}
|
||||
|
||||
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
|
||||
w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName}))
|
||||
w, err := c.CoreV1().ServiceAccounts(ns).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -373,12 +306,12 @@ func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace st
|
||||
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
|
||||
continue
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
Logf("PersistentVolume %s was removed", pvName)
|
||||
return nil
|
||||
}
|
||||
@ -392,12 +325,12 @@ func findAvailableNamespaceName(baseName string, c clientset.Interface) (string,
|
||||
var name string
|
||||
err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
|
||||
name = fmt.Sprintf("%v-%v", baseName, RandomSuffix())
|
||||
_, err := c.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
|
||||
_, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
// Already taken
|
||||
return false, nil
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
Logf("Unexpected error while getting namespace: %v", err)
|
||||
@ -434,7 +367,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s
|
||||
var got *v1.Namespace
|
||||
if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
|
||||
var err error
|
||||
got, err = c.CoreV1().Namespaces().Create(namespaceObj)
|
||||
got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
Logf("Unexpected error while creating namespace: %v", err)
|
||||
return false, nil
|
||||
@ -473,7 +406,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
|
||||
|
||||
Logf("Waiting for terminating namespaces to be deleted...")
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
|
||||
namespaces, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||
namespaces, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Listing namespaces failed: %v", err)
|
||||
continue
|
||||
@ -497,12 +430,12 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
|
||||
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
|
||||
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
|
||||
_, err := c.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
Logf("Service %s in namespace %s found.", name, namespace)
|
||||
return exist, nil
|
||||
case apierrs.IsNotFound(err):
|
||||
case apierrors.IsNotFound(err):
|
||||
Logf("Service %s in namespace %s disappeared.", name, namespace)
|
||||
return !exist, nil
|
||||
case !testutils.IsRetryableAPIError(err):
|
||||
@ -524,7 +457,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i
|
||||
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
return wait.Poll(interval, timeout, func() (bool, error) {
|
||||
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
list, err := c.CoreV1().Endpoints(namespace).List(metav1.ListOptions{})
|
||||
list, err := c.CoreV1().Endpoints(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -589,6 +522,14 @@ func LoadConfig() (config *restclient.Config, err error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// In case Host is not set in TestContext, sets it as
|
||||
// CurrentContext Server for k8s API client to connect to.
|
||||
if TestContext.Host == "" && c.Clusters != nil {
|
||||
currentContext, ok := c.Clusters[c.CurrentContext]
|
||||
if ok {
|
||||
TestContext.Host = currentContext.Server
|
||||
}
|
||||
}
|
||||
|
||||
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
|
||||
}
|
||||
@ -602,11 +543,7 @@ func LoadClientset() (*clientset.Clientset, error) {
|
||||
return clientset.NewForConfig(config)
|
||||
}
|
||||
|
||||
// RandomSuffix provides a random string to append to pods,services,rcs.
|
||||
// TODO: Allow service names to have the same form as names
|
||||
// for pods and replication controllers so we don't
|
||||
// need to use such a function and can instead
|
||||
// use the UUID utility function.
|
||||
// RandomSuffix provides a random sequence to append to pods,services,rcs.
|
||||
func RandomSuffix() string {
|
||||
return strconv.Itoa(rand.Intn(10000))
|
||||
}
|
||||
@ -618,7 +555,7 @@ func Cleanup(filePath, ns string, selectors ...string) {
|
||||
if ns != "" {
|
||||
nsArg = fmt.Sprintf("--namespace=%s", ns)
|
||||
}
|
||||
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
|
||||
RunKubectlOrDie(ns, "delete", "--grace-period=0", "-f", filePath, nsArg)
|
||||
AssertCleanup(ns, selectors...)
|
||||
}
|
||||
|
||||
@ -633,12 +570,12 @@ func AssertCleanup(ns string, selectors ...string) {
|
||||
verifyCleanupFunc := func() (bool, error) {
|
||||
e = nil
|
||||
for _, selector := range selectors {
|
||||
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
|
||||
resources := RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers", nsArg)
|
||||
if resources != "" {
|
||||
e = fmt.Errorf("Resources left running after stop:\n%s", resources)
|
||||
return false, nil
|
||||
}
|
||||
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
|
||||
pods := RunKubectlOrDie(ns, "get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
|
||||
if pods != "" {
|
||||
e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
|
||||
return false, nil
|
||||
@ -652,57 +589,23 @@ func AssertCleanup(ns string, selectors ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
// KubectlCmd runs the kubectl executable through the wrapper script.
|
||||
func KubectlCmd(args ...string) *exec.Cmd {
|
||||
defaultArgs := []string{}
|
||||
|
||||
// Reference a --server option so tests can run anywhere.
|
||||
if TestContext.Host != "" {
|
||||
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
|
||||
}
|
||||
if TestContext.KubeConfig != "" {
|
||||
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
|
||||
|
||||
// Reference the KubeContext
|
||||
if TestContext.KubeContext != "" {
|
||||
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
|
||||
}
|
||||
|
||||
} else {
|
||||
if TestContext.CertDir != "" {
|
||||
defaultArgs = append(defaultArgs,
|
||||
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
|
||||
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
|
||||
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
|
||||
}
|
||||
}
|
||||
kubectlArgs := append(defaultArgs, args...)
|
||||
|
||||
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
|
||||
//and so on.
|
||||
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
|
||||
|
||||
//caller will invoke this and wait on it.
|
||||
return cmd
|
||||
}
|
||||
|
||||
// LookForStringInPodExec looks for the given string in the output of a command
|
||||
// executed in a specific pod container.
|
||||
// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored.
|
||||
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return LookForString(expectedString, timeout, func() string {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
// use the first container
|
||||
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
|
||||
args = append(args, command...)
|
||||
return RunKubectlOrDie(args...)
|
||||
return RunKubectlOrDie(ns, args...)
|
||||
})
|
||||
}
|
||||
|
||||
// LookForString looks for the given string in the output of fn, repeatedly calling fn until
|
||||
// lookForString looks for the given string in the output of fn, repeatedly calling fn until
|
||||
// the timeout is reached or the string is found. Returns last log and possibly
|
||||
// error if the string was not found.
|
||||
// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored.
|
||||
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
|
||||
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
|
||||
result = fn()
|
||||
if strings.Contains(result, expectedString) {
|
||||
@ -721,9 +624,10 @@ type KubectlBuilder struct {
|
||||
}
|
||||
|
||||
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
|
||||
func NewKubectlCommand(args ...string) *KubectlBuilder {
|
||||
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
|
||||
b := new(KubectlBuilder)
|
||||
b.cmd = KubectlCmd(args...)
|
||||
tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath, namespace)
|
||||
b.cmd = tk.KubectlCmd(args...)
|
||||
return b
|
||||
}
|
||||
|
||||
@ -752,14 +656,14 @@ func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder {
|
||||
}
|
||||
|
||||
// ExecOrDie runs the kubectl executable or dies if error occurs.
|
||||
func (b KubectlBuilder) ExecOrDie() string {
|
||||
func (b KubectlBuilder) ExecOrDie(namespace string) string {
|
||||
str, err := b.Exec()
|
||||
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
||||
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
||||
if isTimeout(err) {
|
||||
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
|
||||
time.Sleep(2 * time.Second)
|
||||
retryStr, retryErr := RunKubectl("version")
|
||||
retryStr, retryErr := RunKubectl(namespace, "version")
|
||||
Logf("stdout: %q", retryStr)
|
||||
Logf("err: %v", retryErr)
|
||||
}
|
||||
@ -818,23 +722,23 @@ func (b KubectlBuilder) Exec() (string, error) {
|
||||
}
|
||||
|
||||
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectlOrDie(args ...string) string {
|
||||
return NewKubectlCommand(args...).ExecOrDie()
|
||||
func RunKubectlOrDie(namespace string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectl is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectl(args ...string) (string, error) {
|
||||
return NewKubectlCommand(args...).Exec()
|
||||
func RunKubectl(namespace string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).Exec()
|
||||
}
|
||||
|
||||
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlOrDieInput(data string, args ...string) string {
|
||||
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
|
||||
func RunKubectlOrDieInput(namespace string, data string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlInput(data string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(args...).WithStdinData(data).Exec()
|
||||
func RunKubectlInput(namespace string, data string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec()
|
||||
}
|
||||
|
||||
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
|
||||
@ -910,14 +814,14 @@ func (f *Framework) MatchContainerOutput(
|
||||
createdPod := podClient.Create(pod)
|
||||
defer func() {
|
||||
ginkgo.By("delete the pod")
|
||||
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
|
||||
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{})
|
||||
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod status: %v", err)
|
||||
}
|
||||
@ -987,7 +891,7 @@ func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
||||
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
|
||||
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
return c.CoreV1().Events(ns).List(opts)
|
||||
return c.CoreV1().Events(ns).List(context.TODO(), opts)
|
||||
}, namespace)
|
||||
|
||||
e2epod.DumpAllPodInfoForNamespace(c, namespace)
|
||||
@ -997,7 +901,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
// 2. there are so many of them that working with them are mostly impossible
|
||||
// So we dump them only if the cluster is relatively small.
|
||||
maxNodesForDump := TestContext.MaxNodesToGather
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("unable to fetch node list: %v", err)
|
||||
return
|
||||
@ -1034,7 +938,7 @@ func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
|
||||
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
|
||||
for _, n := range nodeNames {
|
||||
logFunc("\nLogging node info for node %v", n)
|
||||
node, err := c.CoreV1().Nodes().Get(n, metav1.GetOptions{})
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logFunc("Error getting node info %v", err)
|
||||
}
|
||||
@ -1078,7 +982,7 @@ func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
|
||||
Suffix("pods").
|
||||
Do()
|
||||
Do(context.TODO())
|
||||
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
@ -1105,7 +1009,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
|
||||
"source": "kubelet",
|
||||
}.AsSelector().String()
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(options)
|
||||
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
Logf("Unexpected error retrieving node events %v", err)
|
||||
return []v1.Event{}
|
||||
@ -1141,7 +1045,7 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
|
||||
}
|
||||
|
||||
func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil || len(nodes.Items) == 0 {
|
||||
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
|
||||
}
|
||||
@ -1170,7 +1074,7 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la
|
||||
// ExpectNodeHasLabel expects that the given node has the given label pair.
|
||||
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
||||
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
ExpectEqual(node.Labels[labelKey], labelValue)
|
||||
}
|
||||
@ -1198,7 +1102,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string)
|
||||
|
||||
func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
ginkgo.By("verifying the node doesn't have the taint " + taint.ToString())
|
||||
nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
nodeUpdated, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) {
|
||||
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
@ -1216,7 +1120,7 @@ func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint)
|
||||
|
||||
// NodeHasTaint returns true if the node has the given taint, else returns false.
|
||||
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -1255,7 +1159,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
|
||||
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
Logf("%v %s not found: %v", kind, name, err)
|
||||
return nil
|
||||
}
|
||||
@ -1277,7 +1181,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
|
||||
defer ps.Stop()
|
||||
falseVar := false
|
||||
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
deleteOption := metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
startTime := time.Now()
|
||||
if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
|
||||
return err
|
||||
@ -1302,7 +1206,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
timeout = timeout + 3*time.Minute
|
||||
}
|
||||
|
||||
err = e2epod.WaitForPodsInactive(ps, interval, timeout)
|
||||
err = waitForPodsInactive(ps, interval, timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
|
||||
}
|
||||
@ -1312,17 +1216,60 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
// In gce, at any point, small percentage of nodes can disappear for
|
||||
// ~10 minutes due to hostError. 20 minutes should be long enough to
|
||||
// restart VM in that case and delete the pod.
|
||||
err = e2epod.WaitForPodsGone(ps, interval, 20*time.Minute)
|
||||
err = waitForPodsGone(ps, interval, 20*time.Minute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForPodsGone waits until there are no pods left in the PodStore.
|
||||
func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var pods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
if pods = ps.List(); len(pods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range pods {
|
||||
Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// waitForPodsInactive waits until there are no active pods left in the PodStore.
|
||||
// This is to make a fair comparison of deletion time between DeleteRCAndPods
|
||||
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
|
||||
// when the pod is inactvie.
|
||||
func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var activePods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
pods := ps.List()
|
||||
activePods = controller.FilterActivePods(pods)
|
||||
if len(activePods) != 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range activePods {
|
||||
Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell.
|
||||
func RunHostCmd(ns, name, cmd string) (string, error) {
|
||||
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
return RunKubectl(ns, "exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdOrDie calls RunHostCmd and dies on error.
|
||||
@ -1362,7 +1309,7 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
@ -1445,7 +1392,7 @@ func RestartKubelet(host string) error {
|
||||
}
|
||||
|
||||
// RestartApiserver restarts the kube-apiserver.
|
||||
func RestartApiserver(cs clientset.Interface) error {
|
||||
func RestartApiserver(namespace string, cs clientset.Interface) error {
|
||||
// TODO: Make it work for all providers.
|
||||
if !ProviderIs("gce", "gke", "aws") {
|
||||
return fmt.Errorf("unsupported provider for RestartApiserver: %s", TestContext.Provider)
|
||||
@ -1466,7 +1413,7 @@ func RestartApiserver(cs clientset.Interface) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
|
||||
return masterUpgradeGKE(namespace, v.GitVersion[1:]) // strip leading 'v'
|
||||
}
|
||||
|
||||
func sshRestartMaster() error {
|
||||
@ -1508,7 +1455,7 @@ func waitForApiserverRestarted(c clientset.Interface, initialRestartCount int32)
|
||||
func getApiserverRestartCount(c clientset.Interface) (int32, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "kube-apiserver"}))
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
@ -1575,7 +1522,11 @@ func headersForConfig(c *restclient.Config, url *url.URL) (http.Header, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := rt.RoundTrip(&http.Request{URL: url}); err != nil {
|
||||
request, err := http.NewRequest("GET", url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := rt.RoundTrip(request); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extract.Header, nil
|
||||
@ -1609,8 +1560,8 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
|
||||
|
||||
// LookForStringInLog looks for the given string in the log of a specific pod container
|
||||
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return LookForString(expectedString, timeout, func() string {
|
||||
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return RunKubectlOrDie(ns, "logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
|
||||
})
|
||||
}
|
||||
|
||||
@ -1736,22 +1687,6 @@ func RunCmdEnv(env []string, command string, args ...string) (string, string, er
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
// retryCmd runs cmd using args and retries it for up to SingleCallTimeout if
|
||||
// it returns an error. It returns stdout and stderr.
|
||||
func retryCmd(command string, args ...string) (string, string, error) {
|
||||
var err error
|
||||
stdout, stderr := "", ""
|
||||
wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
|
||||
stdout, stderr, err = RunCmd(command, args...)
|
||||
if err != nil {
|
||||
Logf("Got %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used
|
||||
// to create/modify Nodes before running a test.
|
||||
type E2ETestNodePreparer struct {
|
||||
@ -1817,7 +1752,7 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
|
||||
var externalIP, internalIP, hostname string
|
||||
|
||||
// Populate the internal IP.
|
||||
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to get kubernetes endpoints: %v", err)
|
||||
}
|
||||
@ -1868,7 +1803,7 @@ func GetAllMasterAddresses(c clientset.Interface) []string {
|
||||
func DescribeIng(ns string) {
|
||||
Logf("\nOutput of kubectl describe ing:\n")
|
||||
desc, _ := RunKubectl(
|
||||
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
|
||||
ns, "describe", "ing", fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf(desc)
|
||||
}
|
||||
|
||||
@ -1915,25 +1850,24 @@ func (f *Framework) NewAgnhostPod(name string, args ...string) *v1.Pod {
|
||||
// CreateEmptyFileOnPod creates empty file at given path on the pod.
|
||||
// TODO(alejandrox1): move to subpkg pod once kubectl methods have been refactored.
|
||||
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
|
||||
_, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
|
||||
_, err := RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
// DumpDebugInfo dumps debug info of tests.
|
||||
func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
for _, s := range sl.Items {
|
||||
desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
|
||||
desc, _ := RunKubectl(ns, "describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
|
||||
|
||||
l, _ := RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
|
||||
l, _ := RunKubectl(ns, "logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
|
||||
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
|
||||
}
|
||||
}
|
||||
|
||||
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
|
||||
func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
|
||||
var ds appsv1.DaemonSet
|
||||
Logf("Parsing ds from %v", url)
|
||||
|
||||
var response *http.Response
|
||||
@ -1959,7 +1893,12 @@ func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read html response body: %v", err)
|
||||
}
|
||||
return DsFromData(data)
|
||||
}
|
||||
|
||||
// DsFromData reads a byte slice and returns the daemonset in it.
|
||||
func DsFromData(data []byte) (*appsv1.DaemonSet, error) {
|
||||
var ds appsv1.DaemonSet
|
||||
dataJSON, err := utilyaml.ToJSON(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse data to json: %v", err)
|
||||
@ -1974,7 +1913,7 @@ func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
|
||||
|
||||
// GetClusterZones returns the values of zone label collected from all nodes.
|
||||
func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user